diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index fe6d7b59eb3b2..5a508fa106537 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -37,10 +37,7 @@ apply plugin: 'application' archivesBaseName = 'elasticsearch-benchmarks' mainClassName = 'org.openjdk.jmh.Main' -// never try to invoke tests on the benchmark project - there aren't any -check.dependsOn.remove(test) -// explicitly override the test task too in case somebody invokes 'gradle test' so it won't trip -task test(type: Test, overwrite: true) +test.enabled = false dependencies { compile("org.elasticsearch:elasticsearch:${version}") { @@ -59,7 +56,6 @@ compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-u // enable the JMH's BenchmarkProcessor to generate the final benchmark classes // needs to be added separately otherwise Gradle will quote it and javac will fail compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"]) -compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" forbiddenApis { // classes generated by JMH can use all sorts of forbidden APIs but we have no influence at all and cannot exclude these classes diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index e955d5d507ca2..dfa58592a182e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -253,19 +253,15 @@ public class PluginBuildPlugin extends BuildPlugin { protected void addNoticeGeneration(Project project) { File licenseFile = project.pluginProperties.extension.licenseFile if (licenseFile != null) { - project.bundlePlugin.into('/') { - from(licenseFile.parentFile) { - include(licenseFile.name) - } + project.bundlePlugin.from(licenseFile.parentFile) { + include(licenseFile.name) } } File noticeFile = project.pluginProperties.extension.licenseFile if (noticeFile != null) { NoticeTask generateNotice = project.tasks.create('generateNotice', NoticeTask.class) generateNotice.dependencies(project) - project.bundlePlugin.into('/') { - from(generateNotice) - } + project.bundlePlugin.from(generateNotice) } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 3fc622ef5aa4f..c3dff77dfd496 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -51,22 +51,18 @@ class ClusterFormationTasks { * * Returns a list of NodeInfo objects for each node in the cluster. */ - static List setup(Project project, Task task, ClusterConfiguration config) { - if (task.getEnabled() == false) { - // no need to add cluster formation tasks if the task won't run! - return - } + static List setup(Project project, String prefix, Task runner, ClusterConfiguration config) { File sharedDir = new File(project.buildDir, "cluster/shared") // first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything // in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk // such that snapshots survive failures / test runs and there is no simple way today to fix that. - Task cleanup = project.tasks.create(name: "${task.name}#prepareCluster.cleanShared", type: Delete, dependsOn: task.dependsOn.collect()) { + Task cleanup = project.tasks.create(name: "${prefix}#prepareCluster.cleanShared", type: Delete, dependsOn: runner.dependsOn.collect()) { delete sharedDir doLast { sharedDir.mkdirs() } } - List startTasks = [cleanup] + List startTasks = [] List nodes = [] if (config.numNodes < config.numBwcNodes) { throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]") @@ -75,7 +71,7 @@ class ClusterFormationTasks { throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0") } // this is our current version distribution configuration we use for all kinds of REST tests etc. - String distroConfigName = "${task.name}_elasticsearchDistro" + String distroConfigName = "${prefix}_elasticsearchDistro" Configuration currentDistro = project.configurations.create(distroConfigName) configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch) if (config.bwcVersion != null && config.numBwcNodes > 0) { @@ -89,7 +85,7 @@ class ClusterFormationTasks { } configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion) for (Map.Entry entry : config.plugins.entrySet()) { - configureBwcPluginDependency("${task.name}_elasticsearchBwcPlugins", project, entry.getValue(), + configureBwcPluginDependency("${prefix}_elasticsearchBwcPlugins", project, entry.getValue(), project.configurations.elasticsearchBwcPlugins, config.bwcVersion) } project.configurations.elasticsearchBwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) @@ -104,13 +100,14 @@ class ClusterFormationTasks { elasticsearchVersion = config.bwcVersion distro = project.configurations.elasticsearchBwcDistro } - NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir) + NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir) nodes.add(node) - startTasks.add(configureNode(project, task, cleanup, node, distro, nodes.get(0))) + Task dependsOn = startTasks.empty ? cleanup : startTasks.get(0) + startTasks.add(configureNode(project, prefix, runner, dependsOn, node, distro, nodes.get(0))) } - Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks) - task.dependsOn(wait) + Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks) + runner.dependsOn(wait) return nodes } @@ -150,58 +147,58 @@ class ClusterFormationTasks { * * @return a task which starts the node. */ - static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) { + static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) { // tasks are chained so their execution order is maintained - Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) { + Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) { delete node.homeDir delete node.cwd doLast { node.cwd.mkdirs() } } - setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node) - setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node) - setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration) - setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node, seedNode) + setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node) + setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node) + setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, configuration) + setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode) if (node.config.plugins.isEmpty() == false) { if (node.nodeVersion == VersionProperties.elasticsearch) { - setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node) + setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node) } else { - setup = configureCopyBwcPluginsTask(taskName(task, node, 'copyBwcPlugins'), project, setup, node) + setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node) } } // install modules for (Project module : node.config.modules) { String actionName = pluginTaskName('install', module.name, 'Module') - setup = configureInstallModuleTask(taskName(task, node, actionName), project, setup, node, module) + setup = configureInstallModuleTask(taskName(prefix, node, actionName), project, setup, node, module) } // install plugins for (Map.Entry plugin : node.config.plugins.entrySet()) { String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin') - setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue()) + setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, plugin.getValue()) } // sets up any extra config files that need to be copied over to the ES instance; // its run after plugins have been installed, as the extra config files may belong to plugins - setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node) + setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node) // extra setup commands for (Map.Entry command : node.config.setupCommands.entrySet()) { // the first argument is the actual script name, relative to home Object[] args = command.getValue().clone() args[0] = new File(node.homeDir, args[0].toString()) - setup = configureExecTask(taskName(task, node, command.getKey()), project, setup, node, args) + setup = configureExecTask(taskName(prefix, node, command.getKey()), project, setup, node, args) } - Task start = configureStartTask(taskName(task, node, 'start'), project, setup, node) + Task start = configureStartTask(taskName(prefix, node, 'start'), project, setup, node) if (node.config.daemonize) { - Task stop = configureStopTask(taskName(task, node, 'stop'), project, [], node) + Task stop = configureStopTask(taskName(prefix, node, 'stop'), project, [], node) // if we are running in the background, make sure to stop the server when the task completes - task.finalizedBy(stop) + runner.finalizedBy(stop) start.finalizedBy(stop) } return start @@ -648,11 +645,11 @@ class ClusterFormationTasks { } /** Returns a unique task name for this task and node configuration */ - static String taskName(Task parentTask, NodeInfo node, String action) { + static String taskName(String prefix, NodeInfo node, String action) { if (node.config.numNodes > 1) { - return "${parentTask.name}#node${node.nodeNum}.${action}" + return "${prefix}#node${node.nodeNum}.${action}" } else { - return "${parentTask.name}#${action}" + return "${prefix}#${action}" } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 73f32961fb33e..59c65c684acee 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -21,7 +21,6 @@ package org.elasticsearch.gradle.test import org.apache.tools.ant.taskdefs.condition.Os import org.gradle.api.InvalidUserDataException import org.gradle.api.Project -import org.gradle.api.Task /** * A container for the files and configuration associated with a single node in a test cluster. @@ -96,17 +95,17 @@ class NodeInfo { /** the version of elasticsearch that this node runs */ String nodeVersion - /** Creates a node to run as part of a cluster for the given task */ - NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task, String nodeVersion, File sharedDir) { + /** Holds node configuration for part of a test cluster. */ + NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) { this.config = config this.nodeNum = nodeNum this.sharedDir = sharedDir if (config.clusterName != null) { clusterName = config.clusterName } else { - clusterName = "${task.path.replace(':', '_').substring(1)}" + clusterName = project.path.replace(':', '_').substring(1) + '_' + prefix } - baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}") + baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}") pidFile = new File(baseDir, 'es.pid') this.nodeVersion = nodeVersion homeDir = homeDir(baseDir, config.distribution, nodeVersion) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 51bccb4fe7580..075e8129e6fa8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -20,6 +20,7 @@ package org.elasticsearch.gradle.test import com.carrotsearch.gradle.junit4.RandomizedTestingTask import org.elasticsearch.gradle.BuildPlugin +import org.gradle.api.DefaultTask import org.gradle.api.Task import org.gradle.api.internal.tasks.options.Option import org.gradle.api.plugins.JavaBasePlugin @@ -27,12 +28,15 @@ import org.gradle.api.tasks.Input import org.gradle.util.ConfigureUtil /** - * Runs integration tests, but first starts an ES cluster, - * and passes the ES cluster info as parameters to the tests. + * A wrapper task around setting up a cluster and running rest tests. */ -public class RestIntegTestTask extends RandomizedTestingTask { +public class RestIntegTestTask extends DefaultTask { - ClusterConfiguration clusterConfig + protected ClusterConfiguration clusterConfig + + protected RandomizedTestingTask runner + + protected Task clusterInit /** Info about nodes in the integ test cluster. Note this is *not* available until runtime. */ List nodes @@ -44,35 +48,44 @@ public class RestIntegTestTask extends RandomizedTestingTask { public RestIntegTestTask() { description = 'Runs rest tests against an elasticsearch cluster.' group = JavaBasePlugin.VERIFICATION_GROUP - dependsOn(project.testClasses) - classpath = project.sourceSets.test.runtimeClasspath - testClassesDir = project.sourceSets.test.output.classesDir - clusterConfig = new ClusterConfiguration(project) + runner = project.tasks.create("${name}Runner", RandomizedTestingTask.class) + super.dependsOn(runner) + clusterInit = project.tasks.create(name: "${name}Cluster#init", dependsOn: project.testClasses) + runner.dependsOn(clusterInit) + runner.classpath = project.sourceSets.test.runtimeClasspath + runner.testClassesDir = project.sourceSets.test.output.classesDir + clusterConfig = project.extensions.create("${name}Cluster", ClusterConfiguration.class, project) // start with the common test configuration - configure(BuildPlugin.commonTestConfig(project)) + runner.configure(BuildPlugin.commonTestConfig(project)) // override/add more for rest tests - parallelism = '1' - include('**/*IT.class') - systemProperty('tests.rest.load_packaged', 'false') + runner.parallelism = '1' + runner.include('**/*IT.class') + runner.systemProperty('tests.rest.load_packaged', 'false') // we pass all nodes to the rest cluster to allow the clients to round-robin between them // this is more realistic than just talking to a single node - systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}") - systemProperty('tests.config.dir', "${-> nodes[0].confDir}") + runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}") + runner.systemProperty('tests.config.dir', "${-> nodes[0].confDir}") // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass // both as separate sysprops - systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") + runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") // copy the rest spec/tests into the test resources RestSpecHack.configureDependencies(project) project.afterEvaluate { - dependsOn(RestSpecHack.configureTask(project, includePackaged)) + runner.dependsOn(RestSpecHack.configureTask(project, includePackaged)) } // this must run after all projects have been configured, so we know any project // references can be accessed as a fully configured project.gradle.projectsEvaluated { - nodes = ClusterFormationTasks.setup(project, this, clusterConfig) + if (enabled == false) { + runner.enabled = false + clusterInit.enabled = false + return // no need to add cluster formation tasks if the task won't run! + } + nodes = ClusterFormationTasks.setup(project, "${name}Cluster", runner, clusterConfig) + super.dependsOn(runner.finalizedBy) } } @@ -84,25 +97,16 @@ public class RestIntegTestTask extends RandomizedTestingTask { clusterConfig.debug = enabled; } - @Input - public void cluster(Closure closure) { - ConfigureUtil.configure(closure, clusterConfig) - } - - public ClusterConfiguration getCluster() { - return clusterConfig - } - public List getNodes() { return nodes } @Override public Task dependsOn(Object... dependencies) { - super.dependsOn(dependencies) + runner.dependsOn(dependencies) for (Object dependency : dependencies) { if (dependency instanceof Fixture) { - finalizedBy(((Fixture)dependency).stopTask) + runner.finalizedBy(((Fixture)dependency).stopTask) } } return this @@ -110,11 +114,16 @@ public class RestIntegTestTask extends RandomizedTestingTask { @Override public void setDependsOn(Iterable dependencies) { - super.setDependsOn(dependencies) + runner.setDependsOn(dependencies) for (Object dependency : dependencies) { if (dependency instanceof Fixture) { - finalizedBy(((Fixture)dependency).stopTask) + runner.finalizedBy(((Fixture)dependency).stopTask) } } } + + @Override + public Task mustRunAfter(Object... tasks) { + clusterInit.mustRunAfter(tasks) + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy index 47a559efccbd3..f00be89f6aed3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestTestPlugin.groovy @@ -43,7 +43,7 @@ public class RestTestPlugin implements Plugin { } RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class) - integTest.cluster.distribution = 'zip' // rest tests should run with the real zip + integTest.clusterConfig.distribution = 'zip' // rest tests should run with the real zip integTest.mustRunAfter(project.precommit) project.check.dependsOn(integTest) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy index a71dc59dbf914..a88152d7865ff 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy @@ -18,7 +18,7 @@ public class RunTask extends DefaultTask { clusterConfig.daemonize = false clusterConfig.distribution = 'zip' project.afterEvaluate { - ClusterFormationTasks.setup(project, this, clusterConfig) + ClusterFormationTasks.setup(project, name, this, clusterConfig) } } diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 54f148894dabf..309fd865a22ec 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -157,7 +157,6 @@ - diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java index 1e09e890a0b67..77e7cdab93707 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java @@ -53,6 +53,6 @@ protected void doExecute(SearchRequest request, ActionListener l new SearchHit[0], 0L, 0.0f), new InternalAggregations(Collections.emptyList()), new Suggest(Collections.emptyList()), - new SearchProfileShardResults(Collections.emptyMap()), false, false), "", 1, 1, 0, new ShardSearchFailure[0])); + new SearchProfileShardResults(Collections.emptyMap()), false, false, 1), "", 1, 1, 0, new ShardSearchFailure[0])); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index c9f989aad5067..ecba2953c9499 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -28,16 +28,27 @@ import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import java.io.ByteArrayOutputStream; +import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Locale; @@ -73,6 +84,127 @@ static Request ping() { return new Request("HEAD", "/", Collections.emptyMap(), null); } + static Request bulk(BulkRequest bulkRequest) throws IOException { + Params parameters = Params.builder(); + parameters.withTimeout(bulkRequest.timeout()); + parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); + + // Bulk API only supports newline delimited JSON or Smile. Before executing + // the bulk, we need to check that all requests have the same content-type + // and this content-type is supported by the Bulk API. + XContentType bulkContentType = null; + for (int i = 0; i < bulkRequest.numberOfActions(); i++) { + DocWriteRequest request = bulkRequest.requests().get(i); + + DocWriteRequest.OpType opType = request.opType(); + if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { + bulkContentType = enforceSameContentType((IndexRequest) request, bulkContentType); + + } else if (opType == DocWriteRequest.OpType.UPDATE) { + UpdateRequest updateRequest = (UpdateRequest) request; + if (updateRequest.doc() != null) { + bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType); + } + if (updateRequest.upsertRequest() != null) { + bulkContentType = enforceSameContentType(updateRequest.upsertRequest(), bulkContentType); + } + } + } + + if (bulkContentType == null) { + bulkContentType = XContentType.JSON; + } + + byte separator = bulkContentType.xContent().streamSeparator(); + ContentType requestContentType = ContentType.create(bulkContentType.mediaType()); + + ByteArrayOutputStream content = new ByteArrayOutputStream(); + for (DocWriteRequest request : bulkRequest.requests()) { + DocWriteRequest.OpType opType = request.opType(); + + try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) { + metadata.startObject(); + { + metadata.startObject(opType.getLowercase()); + if (Strings.hasLength(request.index())) { + metadata.field("_index", request.index()); + } + if (Strings.hasLength(request.type())) { + metadata.field("_type", request.type()); + } + if (Strings.hasLength(request.id())) { + metadata.field("_id", request.id()); + } + if (Strings.hasLength(request.routing())) { + metadata.field("_routing", request.routing()); + } + if (Strings.hasLength(request.parent())) { + metadata.field("_parent", request.parent()); + } + if (request.version() != Versions.MATCH_ANY) { + metadata.field("_version", request.version()); + } + + VersionType versionType = request.versionType(); + if (versionType != VersionType.INTERNAL) { + if (versionType == VersionType.EXTERNAL) { + metadata.field("_version_type", "external"); + } else if (versionType == VersionType.EXTERNAL_GTE) { + metadata.field("_version_type", "external_gte"); + } else if (versionType == VersionType.FORCE) { + metadata.field("_version_type", "force"); + } + } + + if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { + IndexRequest indexRequest = (IndexRequest) request; + if (Strings.hasLength(indexRequest.getPipeline())) { + metadata.field("pipeline", indexRequest.getPipeline()); + } + } else if (opType == DocWriteRequest.OpType.UPDATE) { + UpdateRequest updateRequest = (UpdateRequest) request; + if (updateRequest.retryOnConflict() > 0) { + metadata.field("_retry_on_conflict", updateRequest.retryOnConflict()); + } + if (updateRequest.fetchSource() != null) { + metadata.field("_source", updateRequest.fetchSource()); + } + } + metadata.endObject(); + } + metadata.endObject(); + + BytesRef metadataSource = metadata.bytes().toBytesRef(); + content.write(metadataSource.bytes, metadataSource.offset, metadataSource.length); + content.write(separator); + } + + BytesRef source = null; + if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { + IndexRequest indexRequest = (IndexRequest) request; + BytesReference indexSource = indexRequest.source(); + XContentType indexXContentType = indexRequest.getContentType(); + + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, indexSource, indexXContentType)) { + try (XContentBuilder builder = XContentBuilder.builder(bulkContentType.xContent())) { + builder.copyCurrentStructure(parser); + source = builder.bytes().toBytesRef(); + } + } + } else if (opType == DocWriteRequest.OpType.UPDATE) { + source = XContentHelper.toXContent((UpdateRequest) request, bulkContentType, false).toBytesRef(); + } + + if (source != null) { + content.write(source.bytes, source.offset, source.length); + content.write(separator); + } + } + + HttpEntity entity = new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType); + return new Request(HttpPost.METHOD_NAME, "/_bulk", parameters.getParams(), entity); + } + static Request exists(GetRequest getRequest) { Request request = get(getRequest); return new Request(HttpHead.METHOD_NAME, request.endpoint, request.params, null); @@ -118,6 +250,48 @@ static Request index(IndexRequest indexRequest) { return new Request(method, endpoint, parameters.getParams(), entity); } + static Request update(UpdateRequest updateRequest) throws IOException { + String endpoint = endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update"); + + Params parameters = Params.builder(); + parameters.withRouting(updateRequest.routing()); + parameters.withParent(updateRequest.parent()); + parameters.withTimeout(updateRequest.timeout()); + parameters.withRefreshPolicy(updateRequest.getRefreshPolicy()); + parameters.withWaitForActiveShards(updateRequest.waitForActiveShards()); + parameters.withDocAsUpsert(updateRequest.docAsUpsert()); + parameters.withFetchSourceContext(updateRequest.fetchSource()); + parameters.withRetryOnConflict(updateRequest.retryOnConflict()); + parameters.withVersion(updateRequest.version()); + parameters.withVersionType(updateRequest.versionType()); + + // The Java API allows update requests with different content types + // set for the partial document and the upsert document. This client + // only accepts update requests that have the same content types set + // for both doc and upsert. + XContentType xContentType = null; + if (updateRequest.doc() != null) { + xContentType = updateRequest.doc().getContentType(); + } + if (updateRequest.upsertRequest() != null) { + XContentType upsertContentType = updateRequest.upsertRequest().getContentType(); + if ((xContentType != null) && (xContentType != upsertContentType)) { + throw new IllegalStateException("Update request cannot have different content types for doc [" + xContentType + "]" + + " and upsert [" + upsertContentType + "] documents"); + } else { + xContentType = upsertContentType; + } + } + if (xContentType == null) { + xContentType = Requests.INDEX_CONTENT_TYPE; + } + + BytesRef source = XContentHelper.toXContent(updateRequest, xContentType, false).toBytesRef(); + HttpEntity entity = new ByteArrayEntity(source.bytes, source.offset, source.length, ContentType.create(xContentType.mediaType())); + + return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), entity); + } + /** * Utility method to build request's endpoint. */ @@ -160,6 +334,13 @@ Params putParam(String key, TimeValue value) { return this; } + Params withDocAsUpsert(boolean docAsUpsert) { + if (docAsUpsert) { + return putParam("doc_as_upsert", Boolean.TRUE.toString()); + } + return this; + } + Params withFetchSourceContext(FetchSourceContext fetchSourceContext) { if (fetchSourceContext != null) { if (fetchSourceContext.fetchSource() == false) { @@ -203,7 +384,14 @@ Params withRefresh(boolean refresh) { Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { - putParam("refresh", refreshPolicy.getValue()); + return putParam("refresh", refreshPolicy.getValue()); + } + return this; + } + + Params withRetryOnConflict(int retryOnConflict) { + if (retryOnConflict > 0) { + return putParam("retry_on_conflict", String.valueOf(retryOnConflict)); } return this; } @@ -252,4 +440,26 @@ static Params builder() { return new Params(); } } + + /** + * Ensure that the {@link IndexRequest}'s content type is supported by the Bulk API and that it conforms + * to the current {@link BulkRequest}'s content type (if it's known at the time of this method get called). + * + * @return the {@link IndexRequest}'s content type + */ + static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) { + XContentType requestContentType = indexRequest.getContentType(); + if (requestContentType != XContentType.JSON && requestContentType != XContentType.SMILE) { + throw new IllegalArgumentException("Unsupported content-type found for request with content-type [" + requestContentType + + "], only JSON and SMILE are supported"); + } + if (xContentType == null) { + return requestContentType; + } + if (requestContentType != xContentType) { + throw new IllegalArgumentException("Mismatching content-type found for request with content-type [" + requestContentType + + "], previous requests have content-type [" + xContentType + "]"); + } + return xContentType; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 8e29ff7a5a10a..e174e2fffe6c0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -26,11 +26,15 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.main.MainRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; @@ -39,10 +43,8 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -import java.util.Collections; import java.util.Objects; import java.util.Set; -import java.util.function.Function; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; @@ -59,6 +61,24 @@ public RestHighLevelClient(RestClient client) { this.client = Objects.requireNonNull(client); } + /** + * Executes a bulk request using the Bulk API + * + * See Bulk API on elastic.co + */ + public BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException { + return performRequestAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously executes a bulk request using the Bulk API + * + * See Bulk API on elastic.co + */ + public void bulkAsync(BulkRequest bulkRequest, ActionListener listener, Header... headers) { + performRequestAsyncAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, listener, emptySet(), headers); + } + /** * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise */ @@ -121,14 +141,35 @@ public void indexAsync(IndexRequest indexRequest, ActionListener performRequestAsyncAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, listener, emptySet(), headers); } - private Resp performRequestAndParseEntity(Req request, Function requestConverter, - CheckedFunction entityParser, Set ignores, Header... headers) throws IOException { - return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers); + /** + * Updates a document using the Update API + *

+ * See Update API on elastic.co + */ + public UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException { + return performRequestAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously updates a document using the Update API + *

+ * See Update API on elastic.co + */ + public void updateAsync(UpdateRequest updateRequest, ActionListener listener, Header... headers) { + performRequestAsyncAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, listener, emptySet(), headers); } - Resp performRequest(Req request, Function requestConverter, - CheckedFunction responseConverter, Set ignores, Header... headers) throws IOException { + private Resp performRequestAndParseEntity(Req request, + CheckedFunction requestConverter, + CheckedFunction entityParser, + Set ignores, Header... headers) throws IOException { + return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers); + } + Resp performRequest(Req request, + CheckedFunction requestConverter, + CheckedFunction responseConverter, + Set ignores, Header... headers) throws IOException { ActionRequestValidationException validationException = request.validate(); if (validationException != null) { throw validationException; @@ -154,22 +195,31 @@ Resp performRequest(Req request, Function void performRequestAsyncAndParseEntity(Req request, Function requestConverter, - CheckedFunction entityParser, ActionListener listener, - Set ignores, Header... headers) { + private void performRequestAsyncAndParseEntity(Req request, + CheckedFunction requestConverter, + CheckedFunction entityParser, + ActionListener listener, Set ignores, Header... headers) { performRequestAsync(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), listener, ignores, headers); } - void performRequestAsync(Req request, Function requestConverter, - CheckedFunction responseConverter, ActionListener listener, - Set ignores, Header... headers) { + void performRequestAsync(Req request, + CheckedFunction requestConverter, + CheckedFunction responseConverter, + ActionListener listener, Set ignores, Header... headers) { ActionRequestValidationException validationException = request.validate(); if (validationException != null) { listener.onFailure(validationException); return; } - Request req = requestConverter.apply(request); + Request req; + try { + req = requestConverter.apply(request); + } catch (Exception e) { + listener.onFailure(e); + return; + } + ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); client.performRequestAsync(req.method, req.endpoint, req.params, req.entity, responseListener, headers); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index b6649cacb0778..4686a23b86851 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -25,22 +25,32 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.get.GetResult; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; import java.util.Collections; import java.util.Map; -import static org.hamcrest.CoreMatchers.containsString; +import static java.util.Collections.singletonMap; public class CrudIT extends ESRestHighLevelClientTestCase { @@ -262,4 +272,253 @@ public void testIndex() throws IOException { "version conflict, document already exists (current version [1])]", exception.getMessage()); } } + + public void testUpdate() throws IOException { + { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "does_not_exist"); + updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values())); + + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> + execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][does_not_exist]: document missing]", + exception.getMessage()); + } + { + IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + indexRequest.source(singletonMap("field", "value")); + IndexResponse indexResponse = highLevelClient().index(indexRequest); + assertEquals(RestStatus.CREATED, indexResponse.status()); + + UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values())); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(indexResponse.getVersion() + 1, updateResponse.getVersion()); + + UpdateRequest updateRequestConflict = new UpdateRequest("index", "type", "id"); + updateRequestConflict.doc(singletonMap("field", "with_version_conflict"), randomFrom(XContentType.values())); + updateRequestConflict.version(indexResponse.getVersion()); + + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> + execute(updateRequestConflict, highLevelClient()::update, highLevelClient()::updateAsync)); + assertEquals(RestStatus.CONFLICT, exception.status()); + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: version conflict, " + + "current version [2] is different than the one provided [1]]", exception.getMessage()); + } + { + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values())); + if (randomBoolean()) { + updateRequest.parent("missing"); + } else { + updateRequest.routing("missing"); + } + execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + }); + + assertEquals(RestStatus.NOT_FOUND, exception.status()); + assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][id]: document missing]", + exception.getMessage()); + } + { + IndexRequest indexRequest = new IndexRequest("index", "type", "with_script"); + indexRequest.source(singletonMap("counter", 12)); + IndexResponse indexResponse = highLevelClient().index(indexRequest); + assertEquals(RestStatus.CREATED, indexResponse.status()); + + UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_script"); + Script script = new Script(ScriptType.INLINE, "painless", "ctx._source.counter += params.count", singletonMap("count", 8)); + updateRequest.script(script); + updateRequest.fetchSource(true); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); + assertEquals(2L, updateResponse.getVersion()); + assertEquals(20, updateResponse.getGetResult().sourceAsMap().get("counter")); + + } + { + IndexRequest indexRequest = new IndexRequest("index", "type", "with_doc"); + indexRequest.source("field_1", "one", "field_3", "three"); + indexRequest.version(12L); + indexRequest.versionType(VersionType.EXTERNAL); + IndexResponse indexResponse = highLevelClient().index(indexRequest); + assertEquals(RestStatus.CREATED, indexResponse.status()); + assertEquals(12L, indexResponse.getVersion()); + + UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_doc"); + updateRequest.doc(singletonMap("field_2", "two"), randomFrom(XContentType.values())); + updateRequest.fetchSource("field_*", "field_3"); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); + assertEquals(13L, updateResponse.getVersion()); + GetResult getResult = updateResponse.getGetResult(); + assertEquals(13L, updateResponse.getVersion()); + Map sourceAsMap = getResult.sourceAsMap(); + assertEquals("one", sourceAsMap.get("field_1")); + assertEquals("two", sourceAsMap.get("field_2")); + assertFalse(sourceAsMap.containsKey("field_3")); + } + { + IndexRequest indexRequest = new IndexRequest("index", "type", "noop"); + indexRequest.source("field", "value"); + IndexResponse indexResponse = highLevelClient().index(indexRequest); + assertEquals(RestStatus.CREATED, indexResponse.status()); + assertEquals(1L, indexResponse.getVersion()); + + UpdateRequest updateRequest = new UpdateRequest("index", "type", "noop"); + updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values())); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(DocWriteResponse.Result.NOOP, updateResponse.getResult()); + assertEquals(1L, updateResponse.getVersion()); + + updateRequest.detectNoop(false); + + updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); + assertEquals(2L, updateResponse.getVersion()); + } + { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_upsert"); + updateRequest.upsert(singletonMap("doc_status", "created")); + updateRequest.doc(singletonMap("doc_status", "updated")); + updateRequest.fetchSource(true); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.CREATED, updateResponse.status()); + assertEquals("index", updateResponse.getIndex()); + assertEquals("type", updateResponse.getType()); + assertEquals("with_upsert", updateResponse.getId()); + GetResult getResult = updateResponse.getGetResult(); + assertEquals(1L, updateResponse.getVersion()); + assertEquals("created", getResult.sourceAsMap().get("doc_status")); + } + { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_doc_as_upsert"); + updateRequest.doc(singletonMap("field", "initialized")); + updateRequest.fetchSource(true); + updateRequest.docAsUpsert(true); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.CREATED, updateResponse.status()); + assertEquals("index", updateResponse.getIndex()); + assertEquals("type", updateResponse.getType()); + assertEquals("with_doc_as_upsert", updateResponse.getId()); + GetResult getResult = updateResponse.getGetResult(); + assertEquals(1L, updateResponse.getVersion()); + assertEquals("initialized", getResult.sourceAsMap().get("field")); + } + { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_scripted_upsert"); + updateRequest.fetchSource(true); + updateRequest.script(new Script(ScriptType.INLINE, "painless", "ctx._source.level = params.test", singletonMap("test", "C"))); + updateRequest.scriptedUpsert(true); + updateRequest.upsert(singletonMap("level", "A")); + + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.CREATED, updateResponse.status()); + assertEquals("index", updateResponse.getIndex()); + assertEquals("type", updateResponse.getType()); + assertEquals("with_scripted_upsert", updateResponse.getId()); + + GetResult getResult = updateResponse.getGetResult(); + assertEquals(1L, updateResponse.getVersion()); + assertEquals("C", getResult.sourceAsMap().get("level")); + } + { + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { + UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), XContentType.JSON)); + updateRequest.upsert(new IndexRequest().source(Collections.singletonMap("field", "upsert"), XContentType.YAML)); + execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + }); + assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents", + exception.getMessage()); + } + } + + public void testBulk() throws IOException { + int nbItems = randomIntBetween(10, 100); + boolean[] errors = new boolean[nbItems]; + + XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < nbItems; i++) { + String id = String.valueOf(i); + boolean erroneous = randomBoolean(); + errors[i] = erroneous; + + DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values()); + if (opType == DocWriteRequest.OpType.DELETE) { + if (erroneous == false) { + assertEquals(RestStatus.CREATED, + highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + } + DeleteRequest deleteRequest = new DeleteRequest("index", "test", id); + bulkRequest.add(deleteRequest); + + } else { + BytesReference source = XContentBuilder.builder(xContentType.xContent()).startObject().field("id", i).endObject().bytes(); + if (opType == DocWriteRequest.OpType.INDEX) { + IndexRequest indexRequest = new IndexRequest("index", "test", id).source(source, xContentType); + if (erroneous) { + indexRequest.version(12L); + } + bulkRequest.add(indexRequest); + + } else if (opType == DocWriteRequest.OpType.CREATE) { + IndexRequest createRequest = new IndexRequest("index", "test", id).source(source, xContentType).create(true); + if (erroneous) { + assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest).status()); + } + bulkRequest.add(createRequest); + + } else if (opType == DocWriteRequest.OpType.UPDATE) { + UpdateRequest updateRequest = new UpdateRequest("index", "test", id) + .doc(new IndexRequest().source(source, xContentType)); + if (erroneous == false) { + assertEquals(RestStatus.CREATED, + highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + } + bulkRequest.add(updateRequest); + } + } + } + + BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync); + assertEquals(RestStatus.OK, bulkResponse.status()); + assertTrue(bulkResponse.getTookInMillis() > 0); + assertEquals(nbItems, bulkResponse.getItems().length); + + for (int i = 0; i < nbItems; i++) { + BulkItemResponse bulkItemResponse = bulkResponse.getItems()[i]; + + assertEquals(i, bulkItemResponse.getItemId()); + assertEquals("index", bulkItemResponse.getIndex()); + assertEquals("test", bulkItemResponse.getType()); + assertEquals(String.valueOf(i), bulkItemResponse.getId()); + + DocWriteRequest.OpType requestOpType = bulkRequest.requests().get(i).opType(); + if (requestOpType == DocWriteRequest.OpType.INDEX || requestOpType == DocWriteRequest.OpType.CREATE) { + assertEquals(errors[i], bulkItemResponse.isFailed()); + assertEquals(errors[i] ? RestStatus.INTERNAL_SERVER_ERROR : RestStatus.CREATED, bulkItemResponse.status()); + } else if (requestOpType == DocWriteRequest.OpType.UPDATE) { + assertEquals(errors[i], bulkItemResponse.isFailed()); + assertEquals(errors[i] ? RestStatus.INTERNAL_SERVER_ERROR : RestStatus.OK, bulkItemResponse.status()); + } else if (requestOpType == DocWriteRequest.OpType.DELETE) { + assertFalse(bulkItemResponse.isFailed()); + assertEquals(errors[i] ? RestStatus.NOT_FOUND : RestStatus.OK, bulkItemResponse.status()); + } + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index f9bf4cc1a391a..1d61ef87c485e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -22,25 +22,39 @@ import org.apache.http.HttpEntity; import org.apache.http.entity.ByteArrayEntity; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkShardRequest; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; +import org.elasticsearch.script.Script; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.RandomObjects; import java.io.IOException; +import java.io.InputStream; import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.function.Consumer; import java.util.function.Function; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.client.Request.enforceSameContentType; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; + public class RequestTests extends ESTestCase { public void testPing() { @@ -121,43 +135,7 @@ private static void getAndExistsTest(Function requestConver expectedParams.put("stored_fields", storedFieldsParam.toString()); } if (randomBoolean()) { - if (randomBoolean()) { - boolean fetchSource = randomBoolean(); - getRequest.fetchSourceContext(new FetchSourceContext(fetchSource)); - if (fetchSource == false) { - expectedParams.put("_source", "false"); - } - } else { - int numIncludes = randomIntBetween(0, 5); - String[] includes = new String[numIncludes]; - StringBuilder includesParam = new StringBuilder(); - for (int i = 0; i < numIncludes; i++) { - String include = randomAsciiOfLengthBetween(3, 10); - includes[i] = include; - includesParam.append(include); - if (i < numIncludes - 1) { - includesParam.append(","); - } - } - if (numIncludes > 0) { - expectedParams.put("_source_include", includesParam.toString()); - } - int numExcludes = randomIntBetween(0, 5); - String[] excludes = new String[numExcludes]; - StringBuilder excludesParam = new StringBuilder(); - for (int i = 0; i < numExcludes; i++) { - String exclude = randomAsciiOfLengthBetween(3, 10); - excludes[i] = exclude; - excludesParam.append(exclude); - if (i < numExcludes - 1) { - excludesParam.append(","); - } - } - if (numExcludes > 0) { - expectedParams.put("_source_exclude", excludesParam.toString()); - } - getRequest.fetchSourceContext(new FetchSourceContext(true, includes, excludes)); - } + randomizeFetchSourceContextParams(getRequest::fetchSourceContext, expectedParams); } } Request request = requestConverter.apply(getRequest); @@ -271,6 +249,325 @@ public void testIndex() throws IOException { } } + public void testUpdate() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + + Map expectedParams = new HashMap<>(); + String index = randomAsciiOfLengthBetween(3, 10); + String type = randomAsciiOfLengthBetween(3, 10); + String id = randomAsciiOfLengthBetween(3, 10); + + UpdateRequest updateRequest = new UpdateRequest(index, type, id); + updateRequest.detectNoop(randomBoolean()); + + if (randomBoolean()) { + BytesReference source = RandomObjects.randomSource(random(), xContentType); + updateRequest.doc(new IndexRequest().source(source, xContentType)); + + boolean docAsUpsert = randomBoolean(); + updateRequest.docAsUpsert(docAsUpsert); + if (docAsUpsert) { + expectedParams.put("doc_as_upsert", "true"); + } + } else { + updateRequest.script(new Script("_value + 1")); + updateRequest.scriptedUpsert(randomBoolean()); + } + if (randomBoolean()) { + BytesReference source = RandomObjects.randomSource(random(), xContentType); + updateRequest.upsert(new IndexRequest().source(source, xContentType)); + } + if (randomBoolean()) { + String routing = randomAsciiOfLengthBetween(3, 10); + updateRequest.routing(routing); + expectedParams.put("routing", routing); + } + if (randomBoolean()) { + String parent = randomAsciiOfLengthBetween(3, 10); + updateRequest.parent(parent); + expectedParams.put("parent", parent); + } + if (randomBoolean()) { + String timeout = randomTimeValue(); + updateRequest.timeout(timeout); + expectedParams.put("timeout", timeout); + } else { + expectedParams.put("timeout", ReplicationRequest.DEFAULT_TIMEOUT.getStringRep()); + } + if (randomBoolean()) { + WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values()); + updateRequest.setRefreshPolicy(refreshPolicy); + if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { + expectedParams.put("refresh", refreshPolicy.getValue()); + } + } + if (randomBoolean()) { + int waitForActiveShards = randomIntBetween(0, 10); + updateRequest.waitForActiveShards(waitForActiveShards); + expectedParams.put("wait_for_active_shards", String.valueOf(waitForActiveShards)); + } + if (randomBoolean()) { + long version = randomLong(); + updateRequest.version(version); + if (version != Versions.MATCH_ANY) { + expectedParams.put("version", Long.toString(version)); + } + } + if (randomBoolean()) { + VersionType versionType = randomFrom(VersionType.values()); + updateRequest.versionType(versionType); + if (versionType != VersionType.INTERNAL) { + expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT)); + } + } + if (randomBoolean()) { + int retryOnConflict = randomIntBetween(0, 5); + updateRequest.retryOnConflict(retryOnConflict); + if (retryOnConflict > 0) { + expectedParams.put("retry_on_conflict", String.valueOf(retryOnConflict)); + } + } + if (randomBoolean()) { + randomizeFetchSourceContextParams(updateRequest::fetchSource, expectedParams); + } + + Request request = Request.update(updateRequest); + assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.endpoint); + assertEquals(expectedParams, request.params); + assertEquals("POST", request.method); + + HttpEntity entity = request.entity; + assertNotNull(entity); + assertTrue(entity instanceof ByteArrayEntity); + + UpdateRequest parsedUpdateRequest = new UpdateRequest(); + + XContentType entityContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue()); + try (XContentParser parser = createParser(entityContentType.xContent(), entity.getContent())) { + parsedUpdateRequest.fromXContent(parser); + } + + assertEquals(updateRequest.scriptedUpsert(), parsedUpdateRequest.scriptedUpsert()); + assertEquals(updateRequest.docAsUpsert(), parsedUpdateRequest.docAsUpsert()); + assertEquals(updateRequest.detectNoop(), parsedUpdateRequest.detectNoop()); + assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource()); + assertEquals(updateRequest.script(), parsedUpdateRequest.script()); + if (updateRequest.doc() != null) { + assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), xContentType); + } else { + assertNull(parsedUpdateRequest.doc()); + } + if (updateRequest.upsertRequest() != null) { + assertToXContentEquivalent(updateRequest.upsertRequest().source(), parsedUpdateRequest.upsertRequest().source(), xContentType); + } else { + assertNull(parsedUpdateRequest.upsertRequest()); + } + } + + public void testUpdateWithDifferentContentTypes() throws IOException { + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { + UpdateRequest updateRequest = new UpdateRequest(); + updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), XContentType.JSON)); + updateRequest.upsert(new IndexRequest().source(singletonMap("field", "upsert"), XContentType.YAML)); + Request.update(updateRequest); + }); + assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents", + exception.getMessage()); + } + + public void testBulk() throws IOException { + Map expectedParams = new HashMap<>(); + + BulkRequest bulkRequest = new BulkRequest(); + if (randomBoolean()) { + String timeout = randomTimeValue(); + bulkRequest.timeout(timeout); + expectedParams.put("timeout", timeout); + } else { + expectedParams.put("timeout", BulkShardRequest.DEFAULT_TIMEOUT.getStringRep()); + } + + if (randomBoolean()) { + WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values()); + bulkRequest.setRefreshPolicy(refreshPolicy); + if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { + expectedParams.put("refresh", refreshPolicy.getValue()); + } + } + + XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + + int nbItems = randomIntBetween(10, 100); + for (int i = 0; i < nbItems; i++) { + String index = randomAsciiOfLength(5); + String type = randomAsciiOfLength(5); + String id = randomAsciiOfLength(5); + + BytesReference source = RandomObjects.randomSource(random(), xContentType); + DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values()); + + DocWriteRequest docWriteRequest = null; + if (opType == DocWriteRequest.OpType.INDEX) { + IndexRequest indexRequest = new IndexRequest(index, type, id).source(source, xContentType); + docWriteRequest = indexRequest; + if (randomBoolean()) { + indexRequest.setPipeline(randomAsciiOfLength(5)); + } + if (randomBoolean()) { + indexRequest.parent(randomAsciiOfLength(5)); + } + } else if (opType == DocWriteRequest.OpType.CREATE) { + IndexRequest createRequest = new IndexRequest(index, type, id).source(source, xContentType).create(true); + docWriteRequest = createRequest; + if (randomBoolean()) { + createRequest.parent(randomAsciiOfLength(5)); + } + } else if (opType == DocWriteRequest.OpType.UPDATE) { + final UpdateRequest updateRequest = new UpdateRequest(index, type, id).doc(new IndexRequest().source(source, xContentType)); + docWriteRequest = updateRequest; + if (randomBoolean()) { + updateRequest.retryOnConflict(randomIntBetween(1, 5)); + } + if (randomBoolean()) { + randomizeFetchSourceContextParams(updateRequest::fetchSource, new HashMap<>()); + } + if (randomBoolean()) { + updateRequest.parent(randomAsciiOfLength(5)); + } + } else if (opType == DocWriteRequest.OpType.DELETE) { + docWriteRequest = new DeleteRequest(index, type, id); + } + + if (randomBoolean()) { + docWriteRequest.routing(randomAsciiOfLength(10)); + } + if (randomBoolean()) { + docWriteRequest.version(randomNonNegativeLong()); + } + if (randomBoolean()) { + docWriteRequest.versionType(randomFrom(VersionType.values())); + } + bulkRequest.add(docWriteRequest); + } + + Request request = Request.bulk(bulkRequest); + assertEquals("/_bulk", request.endpoint); + assertEquals(expectedParams, request.params); + assertEquals("POST", request.method); + + byte[] content = new byte[(int) request.entity.getContentLength()]; + try (InputStream inputStream = request.entity.getContent()) { + Streams.readFully(inputStream, content); + } + + BulkRequest parsedBulkRequest = new BulkRequest(); + parsedBulkRequest.add(content, 0, content.length, xContentType); + assertEquals(bulkRequest.numberOfActions(), parsedBulkRequest.numberOfActions()); + + for (int i = 0; i < bulkRequest.numberOfActions(); i++) { + DocWriteRequest originalRequest = bulkRequest.requests().get(i); + DocWriteRequest parsedRequest = parsedBulkRequest.requests().get(i); + + assertEquals(originalRequest.opType(), parsedRequest.opType()); + assertEquals(originalRequest.index(), parsedRequest.index()); + assertEquals(originalRequest.type(), parsedRequest.type()); + assertEquals(originalRequest.id(), parsedRequest.id()); + assertEquals(originalRequest.routing(), parsedRequest.routing()); + assertEquals(originalRequest.parent(), parsedRequest.parent()); + assertEquals(originalRequest.version(), parsedRequest.version()); + assertEquals(originalRequest.versionType(), parsedRequest.versionType()); + + DocWriteRequest.OpType opType = originalRequest.opType(); + if (opType == DocWriteRequest.OpType.INDEX) { + IndexRequest indexRequest = (IndexRequest) originalRequest; + IndexRequest parsedIndexRequest = (IndexRequest) parsedRequest; + + assertEquals(indexRequest.getPipeline(), parsedIndexRequest.getPipeline()); + assertToXContentEquivalent(indexRequest.source(), parsedIndexRequest.source(), xContentType); + } else if (opType == DocWriteRequest.OpType.UPDATE) { + UpdateRequest updateRequest = (UpdateRequest) originalRequest; + UpdateRequest parsedUpdateRequest = (UpdateRequest) parsedRequest; + + assertEquals(updateRequest.retryOnConflict(), parsedUpdateRequest.retryOnConflict()); + assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource()); + if (updateRequest.doc() != null) { + assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), xContentType); + } else { + assertNull(parsedUpdateRequest.doc()); + } + } + } + } + + public void testBulkWithDifferentContentTypes() throws IOException { + { + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new DeleteRequest("index", "type", "0")); + bulkRequest.add(new UpdateRequest("index", "type", "1").script(new Script("test"))); + bulkRequest.add(new DeleteRequest("index", "type", "2")); + + Request request = Request.bulk(bulkRequest); + assertEquals(XContentType.JSON.mediaType(), request.entity.getContentType().getValue()); + } + { + XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new DeleteRequest("index", "type", "0")); + bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), xContentType)); + bulkRequest.add(new DeleteRequest("index", "type", "2")); + + Request request = Request.bulk(bulkRequest); + assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue()); + } + { + XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + UpdateRequest updateRequest = new UpdateRequest("index", "type", "0"); + if (randomBoolean()) { + updateRequest.doc(new IndexRequest().source(singletonMap("field", "value"), xContentType)); + } else { + updateRequest.upsert(new IndexRequest().source(singletonMap("field", "value"), xContentType)); + } + + Request request = Request.bulk(new BulkRequest().add(updateRequest)); + assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue()); + } + { + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE)); + bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); + assertEquals("Mismatching content-type found for request with content-type [JSON], " + + "previous requests have content-type [SMILE]", exception.getMessage()); + } + { + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest("index", "type", "0") + .source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index", "type", "1") + .source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new UpdateRequest("index", "type", "2") + .doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON)) + .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE)) + ); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); + assertEquals("Mismatching content-type found for request with content-type [SMILE], " + + "previous requests have content-type [JSON]", exception.getMessage()); + } + { + XContentType xContentType = randomFrom(XContentType.CBOR, XContentType.YAML); + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new DeleteRequest("index", "type", "0")); + bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new DeleteRequest("index", "type", "2")); + bulkRequest.add(new DeleteRequest("index", "type", "3")); + bulkRequest.add(new IndexRequest("index", "type", "4").source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), xContentType)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); + assertEquals("Unsupported content-type found for request with content-type [" + xContentType + + "], only JSON and SMILE are supported", exception.getMessage()); + } + } + public void testParams() { final int nbParams = randomIntBetween(0, 10); Request.Params params = Request.Params.builder(); @@ -306,5 +603,77 @@ public void testEndpoint() { assertEquals("/a/b", Request.endpoint("a", "b")); assertEquals("/a/b/_create", Request.endpoint("a", "b", "_create")); assertEquals("/a/b/c/_create", Request.endpoint("a", "b", "c", "_create")); + assertEquals("/a/_create", Request.endpoint("a", null, null, "_create")); + } + + public void testEnforceSameContentType() { + XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + IndexRequest indexRequest = new IndexRequest().source(singletonMap("field", "value"), xContentType); + assertEquals(xContentType, enforceSameContentType(indexRequest, null)); + assertEquals(xContentType, enforceSameContentType(indexRequest, xContentType)); + + XContentType bulkContentType = randomBoolean() ? xContentType : null; + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> + enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR), bulkContentType)); + assertEquals("Unsupported content-type found for request with content-type [CBOR], only JSON and SMILE are supported", + exception.getMessage()); + + exception = expectThrows(IllegalArgumentException.class, () -> + enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML), bulkContentType)); + assertEquals("Unsupported content-type found for request with content-type [YAML], only JSON and SMILE are supported", + exception.getMessage()); + + XContentType requestContentType = xContentType == XContentType.JSON ? XContentType.SMILE : XContentType.JSON; + + exception = expectThrows(IllegalArgumentException.class, () -> + enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType)); + assertEquals("Mismatching content-type found for request with content-type [" + requestContentType + "], " + + "previous requests have content-type [" + xContentType + "]", exception.getMessage()); + } + + /** + * Randomize the {@link FetchSourceContext} request parameters. + */ + private static void randomizeFetchSourceContextParams(Consumer consumer, Map expectedParams) { + if (randomBoolean()) { + if (randomBoolean()) { + boolean fetchSource = randomBoolean(); + consumer.accept(new FetchSourceContext(fetchSource)); + if (fetchSource == false) { + expectedParams.put("_source", "false"); + } + } else { + int numIncludes = randomIntBetween(0, 5); + String[] includes = new String[numIncludes]; + StringBuilder includesParam = new StringBuilder(); + for (int i = 0; i < numIncludes; i++) { + String include = randomAsciiOfLengthBetween(3, 10); + includes[i] = include; + includesParam.append(include); + if (i < numIncludes - 1) { + includesParam.append(","); + } + } + if (numIncludes > 0) { + expectedParams.put("_source_include", includesParam.toString()); + } + int numExcludes = randomIntBetween(0, 5); + String[] excludes = new String[numExcludes]; + StringBuilder excludesParam = new StringBuilder(); + for (int i = 0; i < numExcludes; i++) { + String exclude = randomAsciiOfLengthBetween(3, 10); + excludes[i] = exclude; + excludesParam.append(exclude); + if (i < numExcludes - 1) { + excludesParam.append(","); + } + } + if (numExcludes > 0) { + expectedParams.put("_source_exclude", excludesParam.toString()); + } + consumer.accept(new FetchSourceContext(true, includes, excludes)); + } + } } } \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index b92ce34f93c04..5962ffd48b4c8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -239,7 +239,8 @@ public void testParseResponseException() throws IOException { public void testPerformRequestOnSuccess() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); @@ -260,7 +261,8 @@ public void testPerformRequestOnSuccess() throws IOException { public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); @@ -277,7 +279,8 @@ public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOExcept public void testPerformRequestOnResponseExceptionWithEntity() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", @@ -296,7 +299,8 @@ public void testPerformRequestOnResponseExceptionWithEntity() throws IOException public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); @@ -315,7 +319,8 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOExc public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); RestStatus restStatus = randomFrom(RestStatus.values()); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); @@ -334,7 +339,8 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOEx public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); @@ -347,7 +353,8 @@ public void testPerformRequestOnResponseExceptionWithIgnores() throws IOExceptio public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); @@ -363,7 +370,8 @@ public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException { MainRequest mainRequest = new MainRequest(); - Function requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null); + CheckedFunction requestConverter = request -> + new Request("GET", "/", Collections.emptyMap(), null); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); diff --git a/core/build.gradle b/core/build.gradle index 6e0b94dd6f9a5..99da28e2091a3 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -74,7 +74,7 @@ dependencies { // percentiles aggregation compile 'com.tdunning:t-digest:3.0' // precentil ranks aggregation - compile 'org.hdrhistogram:HdrHistogram:2.1.6' + compile 'org.hdrhistogram:HdrHistogram:2.1.9' // lucene spatial compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional diff --git a/core/licenses/HdrHistogram-2.1.6.jar.sha1 b/core/licenses/HdrHistogram-2.1.6.jar.sha1 deleted file mode 100644 index 26fc16f2e87f0..0000000000000 --- a/core/licenses/HdrHistogram-2.1.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7495feb7f71ee124bd2a7e7d83590e296d71d80e \ No newline at end of file diff --git a/core/licenses/HdrHistogram-2.1.9.jar.sha1 b/core/licenses/HdrHistogram-2.1.9.jar.sha1 new file mode 100644 index 0000000000000..2378df07b2c0c --- /dev/null +++ b/core/licenses/HdrHistogram-2.1.9.jar.sha1 @@ -0,0 +1 @@ +e4631ce165eb400edecfa32e03d3f1be53dee754 \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 1a21082fcbb06..c3479ecc0cfff 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -304,7 +304,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly * if needed and then immediately returns. */ - protected static void parseInnerToXContent(XContentParser parser, DocWriteResponseBuilder context) throws IOException { + protected static void parseInnerToXContent(XContentParser parser, Builder context) throws IOException { XContentParser.Token token = parser.currentToken(); ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); @@ -348,9 +348,11 @@ protected static void parseInnerToXContent(XContentParser parser, DocWriteRespon } /** - * {@link DocWriteResponseBuilder} is used to build {@link DocWriteResponse} objects during XContent parsing. + * Base class of all {@link DocWriteResponse} builders. These {@link DocWriteResponse.Builder} are used during + * xcontent parsing to temporarily store the parsed values, then the {@link Builder#build()} method is called to + * instantiate the appropriate {@link DocWriteResponse} with the parsed values. */ - public abstract static class DocWriteResponseBuilder { + public abstract static class Builder { protected ShardId shardId = null; protected String type = null; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 987aa36585b7a..3023ecb1856a4 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -19,7 +19,9 @@ package org.elasticsearch.action.bulk; +import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -31,13 +33,12 @@ public class BulkItemRequest implements Streamable { private int id; private DocWriteRequest request; private volatile BulkItemResponse primaryResponse; - private volatile boolean ignoreOnReplica; BulkItemRequest() { } - public BulkItemRequest(int id, DocWriteRequest request) { + protected BulkItemRequest(int id, DocWriteRequest request) { this.id = id; this.request = request; } @@ -55,25 +56,16 @@ public String index() { return request.indices()[0]; } - BulkItemResponse getPrimaryResponse() { + // NOTE: protected for testing only + protected BulkItemResponse getPrimaryResponse() { return primaryResponse; } - void setPrimaryResponse(BulkItemResponse primaryResponse) { + // NOTE: protected for testing only + protected void setPrimaryResponse(BulkItemResponse primaryResponse) { this.primaryResponse = primaryResponse; } - /** - * Marks this request to be ignored and *not* execute on a replica. - */ - void setIgnoreOnReplica() { - this.ignoreOnReplica = true; - } - - boolean isIgnoreOnReplica() { - return ignoreOnReplica; - } - public static BulkItemRequest readBulkItem(StreamInput in) throws IOException { BulkItemRequest item = new BulkItemRequest(); item.readFrom(in); @@ -87,14 +79,37 @@ public void readFrom(StreamInput in) throws IOException { if (in.readBoolean()) { primaryResponse = BulkItemResponse.readBulkItem(in); } - ignoreOnReplica = in.readBoolean(); + if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported + boolean ignoreOnReplica = in.readBoolean(); + if (ignoreOnReplica == false && primaryResponse != null) { + assert primaryResponse.isFailed() == false : "expected no failure on the primary response"; + } + } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); - DocWriteRequest.writeDocumentRequest(out, request); + if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported + // old nodes expect updated version and version type on the request + if (primaryResponse != null) { + request.version(primaryResponse.getVersion()); + request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); + DocWriteRequest.writeDocumentRequest(out, request); + } else { + DocWriteRequest.writeDocumentRequest(out, request); + } + } else { + DocWriteRequest.writeDocumentRequest(out, request); + } out.writeOptionalStreamable(primaryResponse); - out.writeBoolean(ignoreOnReplica); + if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported + if (primaryResponse != null) { + out.writeBoolean(primaryResponse.isFailed() + || primaryResponse.getResponse().getResult() == DocWriteResponse.Result.NOOP); + } else { + out.writeBoolean(false); + } + } } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index f352c7ef47a73..31511e6b94f09 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -40,7 +40,6 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -import java.util.function.Supplier; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; @@ -102,21 +101,21 @@ public static BulkItemResponse fromXContent(XContentParser parser, int id) throw final OpType opType = OpType.fromString(currentFieldName); ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation); - DocWriteResponse.DocWriteResponseBuilder builder = null; + DocWriteResponse.Builder builder = null; CheckedConsumer itemParser = null; if (opType == OpType.INDEX || opType == OpType.CREATE) { - final IndexResponse.IndexResponseBuilder indexResponseBuilder = new IndexResponse.IndexResponseBuilder(); + final IndexResponse.Builder indexResponseBuilder = new IndexResponse.Builder(); builder = indexResponseBuilder; itemParser = (indexParser) -> IndexResponse.parseXContentFields(indexParser, indexResponseBuilder); } else if (opType == OpType.UPDATE) { - final UpdateResponse.UpdateResponseBuilder updateResponseBuilder = new UpdateResponse.UpdateResponseBuilder(); + final UpdateResponse.Builder updateResponseBuilder = new UpdateResponse.Builder(); builder = updateResponseBuilder; itemParser = (updateParser) -> UpdateResponse.parseXContentFields(updateParser, updateResponseBuilder); } else if (opType == OpType.DELETE) { - final DeleteResponse.DeleteResponseBuilder deleteResponseBuilder = new DeleteResponse.DeleteResponseBuilder(); + final DeleteResponse.Builder deleteResponseBuilder = new DeleteResponse.Builder(); builder = deleteResponseBuilder; itemParser = (deleteParser) -> DeleteResponse.parseXContentFields(deleteParser, deleteResponseBuilder); } else { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index e214f87ddb63b..8e0b48143dc92 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -23,17 +23,32 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; +import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken; /** * A response of a bulk execution. Holding a response for each item responding (in order) of the * bulk requests. Each item holds the index/type/id is operated on, and if it failed or not (with the * failure message). */ -public class BulkResponse extends ActionResponse implements Iterable { +public class BulkResponse extends ActionResponse implements Iterable, StatusToXContentObject { + + private static final String ITEMS = "items"; + private static final String ERRORS = "errors"; + private static final String TOOK = "took"; + private static final String INGEST_TOOK = "ingest_took"; public static final long NO_INGEST_TOOK = -1L; @@ -141,4 +156,61 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(tookInMillis); out.writeZLong(ingestTookInMillis); } + + @Override + public RestStatus status() { + return RestStatus.OK; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TOOK, tookInMillis); + if (ingestTookInMillis != BulkResponse.NO_INGEST_TOOK) { + builder.field(INGEST_TOOK, ingestTookInMillis); + } + builder.field(ERRORS, hasFailures()); + builder.startArray(ITEMS); + for (BulkItemResponse item : this) { + item.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + public static BulkResponse fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation); + + long took = -1L; + long ingestTook = NO_INGEST_TOOK; + List items = new ArrayList<>(); + + String currentFieldName = parser.currentName(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (TOOK.equals(currentFieldName)) { + took = parser.longValue(); + } else if (INGEST_TOOK.equals(currentFieldName)) { + ingestTook = parser.longValue(); + } else if (ERRORS.equals(currentFieldName) == false) { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (ITEMS.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + items.add(BulkItemResponse.fromXContent(parser, items.size())); + } + } else { + throwUnknownField(currentFieldName, parser.getTokenLocation()); + } + } else { + throwUnknownToken(token, parser.getTokenLocation()); + } + } + return new BulkResponse(items.toArray(new BulkItemResponse[items.size()]), took, ingestTook); + } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index c270c51ea382d..8e2dde7db6370 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -36,7 +36,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest { public BulkShardRequest() { } - BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) { + public BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) { super(shardId); this.items = items; setRefreshPolicy(refreshPolicy); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index b51ce624800a5..aa368c13fb80e 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -36,7 +36,8 @@ public class BulkShardResponse extends ReplicationResponse implements WriteRespo BulkShardResponse() { } - BulkShardResponse(ShardId shardId, BulkItemResponse[] responses) { + // NOTE: public for testing only + public BulkShardResponse(ShardId shardId, BulkItemResponse[] responses) { this.shardId = shardId; this.responses = responses; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index fc580dd388028..efc01ab45f818 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -104,14 +104,10 @@ protected boolean resolveIndex() { public WritePrimaryResult shardOperationOnPrimary( BulkShardRequest request, IndexShard primary) throws Exception { final IndexMetaData metaData = primary.indexSettings().getIndexMetaData(); - - long[] preVersions = new long[request.items().length]; - VersionType[] preVersionTypes = new VersionType[request.items().length]; Translog.Location location = null; for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) { - location = executeBulkItemRequest(metaData, primary, request, preVersions, preVersionTypes, location, requestIndex); + location = executeBulkItemRequest(metaData, primary, request, location, requestIndex); } - BulkItemResponse[] responses = new BulkItemResponse[request.items().length]; BulkItemRequest[] items = request.items(); for (int i = 0; i < items.length; i++) { @@ -124,110 +120,73 @@ public WritePrimaryResult shardOperationOnP /** Executes bulk item requests and handles request execution exceptions */ private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard primary, BulkShardRequest request, - long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex) throws Exception { final DocWriteRequest itemRequest = request.items()[requestIndex].request(); - preVersions[requestIndex] = itemRequest.version(); - preVersionTypes[requestIndex] = itemRequest.versionType(); - DocWriteRequest.OpType opType = itemRequest.opType(); - try { - // execute item request - final Engine.Result operationResult; - final DocWriteResponse response; - final BulkItemRequest replicaRequest; - switch (itemRequest.opType()) { - case CREATE: - case INDEX: - final IndexRequest indexRequest = (IndexRequest) itemRequest; - Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction); - if (indexResult.hasFailure()) { - response = null; - } else { - // update the version on request so it will happen on the replicas - final long version = indexResult.getVersion(); - indexRequest.version(version); - indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery()); - indexRequest.setSeqNo(indexResult.getSeqNo()); - assert indexRequest.versionType().validateVersionForWrites(indexRequest.version()); - response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(), - indexResult.getVersion(), indexResult.isCreated()); - } - operationResult = indexResult; - replicaRequest = request.items()[requestIndex]; - break; - case UPDATE: - UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest), - primary, metaData, request, requestIndex); - operationResult = updateResultHolder.operationResult; - response = updateResultHolder.response; - replicaRequest = updateResultHolder.replicaRequest; - break; - case DELETE: - final DeleteRequest deleteRequest = (DeleteRequest) itemRequest; - Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary); - if (deleteResult.hasFailure()) { - response = null; - } else { - // update the request with the version so it will go to the replicas - deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery()); - deleteRequest.version(deleteResult.getVersion()); - deleteRequest.setSeqNo(deleteResult.getSeqNo()); - assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version()); - response = new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(), - deleteResult.getVersion(), deleteResult.isFound()); - } - operationResult = deleteResult; - replicaRequest = request.items()[requestIndex]; - break; - default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); - } + final DocWriteRequest.OpType opType = itemRequest.opType(); + final Engine.Result operationResult; + final DocWriteResponse response; + final BulkItemRequest replicaRequest; + switch (itemRequest.opType()) { + case CREATE: + case INDEX: + final IndexRequest indexRequest = (IndexRequest) itemRequest; + Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction); + response = indexResult.hasFailure() ? null : + new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(), + indexResult.getVersion(), indexResult.isCreated()); + operationResult = indexResult; + replicaRequest = request.items()[requestIndex]; + break; + case UPDATE: + UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest), + primary, metaData, request, requestIndex); + operationResult = updateResultHolder.operationResult; + response = updateResultHolder.response; + replicaRequest = updateResultHolder.replicaRequest; + break; + case DELETE: + final DeleteRequest deleteRequest = (DeleteRequest) itemRequest; + Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary); + response = deleteResult.hasFailure() ? null : + new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(), + deleteResult.getVersion(), deleteResult.isFound()); + operationResult = deleteResult; + replicaRequest = request.items()[requestIndex]; + break; + default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); + } - // update the bulk item request because update request execution can mutate the bulk item request - request.items()[requestIndex] = replicaRequest; - if (operationResult == null) { // in case of noop update operation - assert response.getResult() == DocWriteResponse.Result.NOOP - : "only noop update can have null operation"; - replicaRequest.setIgnoreOnReplica(); - replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response)); - } else if (operationResult.hasFailure() == false) { - location = locationToSync(location, operationResult.getTranslogLocation()); - BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response); - replicaRequest.setPrimaryResponse(primaryResponse); - // set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though. - primaryResponse.getResponse().setShardInfo(new ShardInfo()); + // update the bulk item request because update request execution can mutate the bulk item request + request.items()[requestIndex] = replicaRequest; + if (operationResult == null) { // in case of noop update operation + assert response.getResult() == DocWriteResponse.Result.NOOP + : "only noop update can have null operation"; + replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response)); + } else if (operationResult.hasFailure() == false) { + location = locationToSync(location, operationResult.getTranslogLocation()); + BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response); + replicaRequest.setPrimaryResponse(primaryResponse); + // set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though. + primaryResponse.getResponse().setShardInfo(new ShardInfo()); + } else { + DocWriteRequest docWriteRequest = replicaRequest.request(); + Exception failure = operationResult.getFailure(); + if (isConflictException(failure)) { + logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); } else { - DocWriteRequest docWriteRequest = replicaRequest.request(); - Exception failure = operationResult.getFailure(); - if (isConflictException(failure)) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); - } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); - } - // if its a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the successful execution - if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) { - replicaRequest.setIgnoreOnReplica(); - replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(), - new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure))); - } + logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); } - assert replicaRequest.getPrimaryResponse() != null; - assert preVersionTypes[requestIndex] != null; - } catch (Exception e) { - // rethrow the failure if we are going to retry on primary and let parent failure to handle it - if (retryPrimaryException(e)) { - // restore updated versions... - for (int j = 0; j < requestIndex; j++) { - DocWriteRequest docWriteRequest = request.items()[j].request(); - docWriteRequest.version(preVersions[j]); - docWriteRequest.versionType(preVersionTypes[j]); - } + // if its a conflict failure, and we already executed the request on a primary (and we execute it + // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) + // then just use the response we got from the successful execution + if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) { + replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(), + new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure))); } - throw e; } + assert replicaRequest.getPrimaryResponse() != null; return location; } @@ -266,7 +225,7 @@ private UpdateResultHolder executeUpdateRequest(UpdateRequest updateRequest, Ind final UpdateHelper.Result translate; // translate update request try { - translate = updateHelper.prepare(updateRequest, primary, threadPool::estimatedTimeInMillis); + translate = updateHelper.prepare(updateRequest, primary, threadPool::absoluteTimeInMillis); } catch (Exception failure) { // we may fail translating a update to index or delete operation // we use index result to communicate failure while translating update request @@ -281,25 +240,10 @@ private UpdateResultHolder executeUpdateRequest(UpdateRequest updateRequest, Ind MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); indexRequest.process(mappingMd, request.index()); updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction); - if (updateOperationResult.hasFailure() == false) { - // update the version on request so it will happen on the replicas - final long version = updateOperationResult.getVersion(); - indexRequest.version(version); - indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery()); - indexRequest.setSeqNo(updateOperationResult.getSeqNo()); - assert indexRequest.versionType().validateVersionForWrites(indexRequest.version()); - } break; case DELETED: DeleteRequest deleteRequest = translate.action(); updateOperationResult = executeDeleteRequestOnPrimary(deleteRequest, primary); - if (updateOperationResult.hasFailure() == false) { - // update the request with the version so it will go to the replicas - deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery()); - deleteRequest.version(updateOperationResult.getVersion()); - deleteRequest.setSeqNo(updateOperationResult.getSeqNo()); - assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version()); - } break; case NOOP: primary.noopUpdate(updateRequest.type()); @@ -348,10 +292,7 @@ private UpdateResultHolder executeUpdateRequest(UpdateRequest updateRequest, Ind replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest); break; } - assert (replicaRequest.request() instanceof IndexRequest - && ((IndexRequest) replicaRequest.request()).getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) || - (replicaRequest.request() instanceof DeleteRequest - && ((DeleteRequest) replicaRequest.request()).getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO); + assert updateOperationResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO; // successful operation break; // out of retry loop } else if (updateOperationResult.getFailure() instanceof VersionConflictEngineException == false) { @@ -367,20 +308,20 @@ public WriteReplicaResult shardOperationOnReplica(BulkShardReq Translog.Location location = null; for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; - if (item.isIgnoreOnReplica() == false) { + assert item.getPrimaryResponse() != null : "expected primary response to be set for item [" + i + "] request ["+ item.request() +"]"; + if (item.getPrimaryResponse().isFailed() == false && + item.getPrimaryResponse().getResponse().getResult() != DocWriteResponse.Result.NOOP) { DocWriteRequest docWriteRequest = item.request(); - // ensure request version is updated for replica operation during request execution in the primary - assert docWriteRequest.versionType() == docWriteRequest.versionType().versionTypeForReplicationAndRecovery() - : "unexpected version in replica " + docWriteRequest.version(); + DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse(); final Engine.Result operationResult; try { switch (docWriteRequest.opType()) { case CREATE: case INDEX: - operationResult = executeIndexRequestOnReplica((IndexRequest) docWriteRequest, replica); + operationResult = executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, replica); break; case DELETE: - operationResult = executeDeleteRequestOnReplica((DeleteRequest) docWriteRequest, replica); + operationResult = executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, replica); break; default: throw new IllegalStateException("Unexpected request operation type on replica: " @@ -426,17 +367,21 @@ private Translog.Location locationToSync(Translog.Location current, Translog.Loc * Execute the given {@link IndexRequest} on a replica shard, throwing a * {@link RetryOnReplicaException} if the operation needs to be re-tried. */ - public static Engine.IndexResult executeIndexRequestOnReplica(IndexRequest request, IndexShard replica) throws IOException { + public static Engine.IndexResult executeIndexRequestOnReplica(DocWriteResponse primaryResponse, IndexRequest request, IndexShard replica) throws IOException { final ShardId shardId = replica.shardId(); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source(), request.getContentType()).routing(request.routing()).parent(request.parent()); final Engine.Index operation; + final long version = primaryResponse.getVersion(); + final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery(); + assert versionType.validateVersionForWrites(version); + final long seqNo = primaryResponse.getSeqNo(); try { - operation = replica.prepareIndexOnReplica(sourceToParse, request.getSeqNo(), request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); + operation = replica.prepareIndexOnReplica(sourceToParse, seqNo, version, versionType, request.getAutoGeneratedTimestamp(), request.isRetry()); } catch (MapperParsingException e) { - return new Engine.IndexResult(e, request.version(), request.getSeqNo()); + return new Engine.IndexResult(e, version, seqNo); } Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { @@ -446,7 +391,7 @@ public static Engine.IndexResult executeIndexRequestOnReplica(IndexRequest reque } /** Utility method to prepare an index operation on primary shards */ - static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) { + private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) { SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source(), request.getContentType()).routing(request.routing()).parent(request.parent()); @@ -460,7 +405,7 @@ public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest reque try { operation = prepareIndexOperationOnPrimary(request, primary); } catch (MapperParsingException | IllegalArgumentException e) { - return new Engine.IndexResult(e, request.version(), request.getSeqNo()); + return new Engine.IndexResult(e, request.version()); } Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); final ShardId shardId = primary.shardId(); @@ -471,12 +416,12 @@ public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest reque mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update); } catch (IllegalArgumentException e) { // throws IAE on conflicts merging dynamic mappings - return new Engine.IndexResult(e, request.version(), request.getSeqNo()); + return new Engine.IndexResult(e, request.version()); } try { operation = prepareIndexOperationOnPrimary(request, primary); } catch (MapperParsingException | IllegalArgumentException e) { - return new Engine.IndexResult(e, request.version(), request.getSeqNo()); + return new Engine.IndexResult(e, request.version()); } update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { @@ -487,14 +432,17 @@ public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest reque return primary.index(operation); } - public static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) throws IOException { + private static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) throws IOException { final Engine.Delete delete = primary.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType()); return primary.delete(delete); } - public static Engine.DeleteResult executeDeleteRequestOnReplica(DeleteRequest request, IndexShard replica) throws IOException { + private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteResponse primaryResponse, DeleteRequest request, IndexShard replica) throws IOException { + final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery(); + final long version = primaryResponse.getVersion(); + assert versionType.validateVersionForWrites(version); final Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(), - request.getSeqNo(), request.primaryTerm(), request.version(), request.versionType()); + primaryResponse.getSeqNo(), request.primaryTerm(), version, versionType); return replica.delete(delete); } } diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java index 2c511b7447616..3680d09d39b2a 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java @@ -74,7 +74,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t public static DeleteResponse fromXContent(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); - DeleteResponseBuilder context = new DeleteResponseBuilder(); + Builder context = new Builder(); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { parseXContentFields(parser, context); } @@ -84,7 +84,7 @@ public static DeleteResponse fromXContent(XContentParser parser) throws IOExcept /** * Parse the current token and update the parsing context appropriately. */ - public static void parseXContentFields(XContentParser parser, DeleteResponseBuilder context) throws IOException { + public static void parseXContentFields(XContentParser parser, Builder context) throws IOException { XContentParser.Token token = parser.currentToken(); String currentFieldName = parser.currentName(); @@ -97,7 +97,12 @@ public static void parseXContentFields(XContentParser parser, DeleteResponseBuil } } - public static class DeleteResponseBuilder extends DocWriteResponse.DocWriteResponseBuilder { + /** + * Builder class for {@link DeleteResponse}. This builder is usually used during xcontent parsing to + * temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to + * instantiate the {@link DeleteResponse}. + */ + public static class Builder extends DocWriteResponse.Builder { private boolean found = false; diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 5f2c66f6b8aae..6310a2aac1868 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -76,7 +76,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t public static IndexResponse fromXContent(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); - IndexResponseBuilder context = new IndexResponseBuilder(); + Builder context = new Builder(); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { parseXContentFields(parser, context); } @@ -86,7 +86,7 @@ public static IndexResponse fromXContent(XContentParser parser) throws IOExcepti /** * Parse the current token and update the parsing context appropriately. */ - public static void parseXContentFields(XContentParser parser, IndexResponseBuilder context) throws IOException { + public static void parseXContentFields(XContentParser parser, Builder context) throws IOException { XContentParser.Token token = parser.currentToken(); String currentFieldName = parser.currentName(); @@ -99,7 +99,12 @@ public static void parseXContentFields(XContentParser parser, IndexResponseBuild } } - public static class IndexResponseBuilder extends DocWriteResponse.DocWriteResponseBuilder { + /** + * Builder class for {@link IndexResponse}. This builder is usually used during xcontent parsing to + * temporarily store the parsed values, then the {@link Builder#build()} method is called to + * instantiate the {@link IndexResponse}. + */ + public static class Builder extends DocWriteResponse.Builder { private boolean created = false; diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 962c52fbdc2ed..bf95b7517c6ed 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -42,7 +42,6 @@ import java.util.List; import java.util.Map; -import java.util.StringJoiner; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; @@ -61,7 +60,7 @@ abstract class AbstractSearchAsyncAction exten **/ private final Function nodeIdToConnection; private final SearchTask task; - private final AtomicArray results; + private final SearchPhaseResults results; private final long clusterStateVersion; private final Map aliasFilter; private final Map concreteIndexBoosts; @@ -76,7 +75,7 @@ protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportS Map aliasFilter, Map concreteIndexBoosts, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, - long clusterStateVersion, SearchTask task) { + long clusterStateVersion, SearchTask task, SearchPhaseResults resultConsumer) { super(name, request, shardsIts, logger); this.startTime = startTime; this.logger = logger; @@ -87,9 +86,9 @@ protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportS this.listener = listener; this.nodeIdToConnection = nodeIdToConnection; this.clusterStateVersion = clusterStateVersion; - results = new AtomicArray<>(shardsIts.size()); this.concreteIndexBoosts = concreteIndexBoosts; this.aliasFilter = aliasFilter; + this.results = resultConsumer; } /** @@ -105,7 +104,7 @@ private long buildTookInMillis() { * This is the main entry point for a search. This method starts the search execution of the initial phase. */ public final void start() { - if (results.length() == 0) { + if (getNumShards() == 0) { //no search shards to search on, bail with empty response //(it happens with search across _all with no indices around and consistent with broadcast operations) listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(), @@ -130,8 +129,8 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha onPhaseFailure(currentPhase, "all shards failed", null); } else { if (logger.isTraceEnabled()) { - final String resultsFrom = results.asList().stream() - .map(r -> r.value.shardTarget().toString()).collect(Collectors.joining(",")); + final String resultsFrom = results.getSuccessfulResults() + .map(r -> r.shardTarget().toString()).collect(Collectors.joining(",")); logger.trace("[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})", currentPhase.getName(), nextPhase.getName(), resultsFrom, clusterStateVersion); } @@ -178,7 +177,7 @@ public final void onShardFailure(final int shardIndex, @Nullable SearchShardTarg synchronized (shardFailuresMutex) { shardFailures = this.shardFailures.get(); // read again otherwise somebody else has created it? if (shardFailures == null) { // still null so we are the first and create a new instance - shardFailures = new AtomicArray<>(results.length()); + shardFailures = new AtomicArray<>(getNumShards()); this.shardFailures.set(shardFailures); } } @@ -194,7 +193,7 @@ public final void onShardFailure(final int shardIndex, @Nullable SearchShardTarg } } - if (results.get(shardIndex) != null) { + if (results.hasResult(shardIndex)) { assert failure == null : "shard failed before but shouldn't: " + failure; successfulOps.decrementAndGet(); // if this shard was successful before (initial phase) we have to adjust the counter } @@ -207,22 +206,22 @@ public final void onShardFailure(final int shardIndex, @Nullable SearchShardTarg * @param exception the exception explaining or causing the phase failure */ private void raisePhaseFailure(SearchPhaseExecutionException exception) { - for (AtomicArray.Entry entry : results.asList()) { + results.getSuccessfulResults().forEach((entry) -> { try { - Transport.Connection connection = nodeIdToConnection.apply(entry.value.shardTarget().getNodeId()); - sendReleaseSearchContext(entry.value.id(), connection); + Transport.Connection connection = nodeIdToConnection.apply(entry.shardTarget().getNodeId()); + sendReleaseSearchContext(entry.id(), connection); } catch (Exception inner) { inner.addSuppressed(exception); logger.trace("failed to release context", inner); } - } + }); listener.onFailure(exception); } @Override public final void onShardSuccess(int shardIndex, Result result) { successfulOps.incrementAndGet(); - results.set(shardIndex, result); + results.consumeResult(shardIndex, result); if (logger.isTraceEnabled()) { logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null); } @@ -242,7 +241,7 @@ public final void onPhaseDone() { @Override public final int getNumShards() { - return results.length(); + return results.getNumShards(); } @Override @@ -262,7 +261,7 @@ public final SearchRequest getRequest() { @Override public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { - return new SearchResponse(internalSearchResponse, scrollId, results.length(), successfulOps.get(), + return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(), buildTookInMillis(), buildShardFailures()); } @@ -310,6 +309,5 @@ public final ShardSearchTransportRequest buildShardSearchRequest(ShardIterator s * executed shard request * @param context the search context for the next phase */ - protected abstract SearchPhase getNextPhase(AtomicArray results, SearchPhaseContext context); - + protected abstract SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context); } diff --git a/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java b/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java index be0ee2c161e24..65f2d2d280ba1 100644 --- a/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java +++ b/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -30,17 +29,13 @@ * where the given index is used to set the result on the array. */ final class CountedCollector { - private final AtomicArray resultArray; + private final ResultConsumer resultConsumer; private final CountDown counter; private final Runnable onFinish; private final SearchPhaseContext context; - CountedCollector(AtomicArray resultArray, int expectedOps, Runnable onFinish, SearchPhaseContext context) { - if (expectedOps > resultArray.length()) { - throw new IllegalStateException("unexpected number of operations. got: " + expectedOps + " but array size is: " - + resultArray.length()); - } - this.resultArray = resultArray; + CountedCollector(ResultConsumer resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) { + this.resultConsumer = resultConsumer; this.counter = new CountDown(expectedOps); this.onFinish = onFinish; this.context = context; @@ -63,7 +58,7 @@ void countDown() { void onResult(int index, R result, SearchShardTarget target) { try { result.shardTarget(target); - resultArray.set(index, result); + resultConsumer.consume(index, result); } finally { countDown(); } @@ -80,4 +75,12 @@ void onFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Ex countDown(); } } + + /** + * A functional interface to plug in shard result consumers to this collector + */ + @FunctionalInterface + public interface ResultConsumer { + void consume(int shardIndex, R result); + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index 5447b9eee8f10..0ac3c69b8ebc7 100644 --- a/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -40,18 +40,19 @@ * @see CountedCollector#onFailure(int, SearchShardTarget, Exception) */ final class DfsQueryPhase extends SearchPhase { - private final AtomicArray queryResult; + private final InitialSearchPhase.SearchPhaseResults queryResult; private final SearchPhaseController searchPhaseController; private final AtomicArray dfsSearchResults; - private final Function, SearchPhase> nextPhaseFactory; + private final Function, SearchPhase> nextPhaseFactory; private final SearchPhaseContext context; private final SearchTransportService searchTransportService; DfsQueryPhase(AtomicArray dfsSearchResults, SearchPhaseController searchPhaseController, - Function, SearchPhase> nextPhaseFactory, SearchPhaseContext context) { + Function, SearchPhase> nextPhaseFactory, + SearchPhaseContext context) { super("dfs_query"); - this.queryResult = new AtomicArray<>(dfsSearchResults.length()); + this.queryResult = searchPhaseController.newSearchPhaseResults(context.getRequest(), context.getNumShards()); this.searchPhaseController = searchPhaseController; this.dfsSearchResults = dfsSearchResults; this.nextPhaseFactory = nextPhaseFactory; @@ -64,7 +65,8 @@ public void run() throws IOException { // TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs // to free up memory early final AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsSearchResults); - final CountedCollector counter = new CountedCollector<>(queryResult, dfsSearchResults.asList().size(), + final CountedCollector counter = new CountedCollector<>(queryResult::consumeResult, + dfsSearchResults.asList().size(), () -> { context.executeNextPhase(this, nextPhaseFactory.apply(queryResult)); }, context); diff --git a/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 1215e97ae3ab1..20d91770675f7 100644 --- a/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -49,29 +49,31 @@ final class FetchSearchPhase extends SearchPhase { private final Function nextPhaseFactory; private final SearchPhaseContext context; private final Logger logger; + private final InitialSearchPhase.SearchPhaseResults resultConsumer; - FetchSearchPhase(AtomicArray queryResults, + FetchSearchPhase(InitialSearchPhase.SearchPhaseResults resultConsumer, SearchPhaseController searchPhaseController, SearchPhaseContext context) { - this(queryResults, searchPhaseController, context, + this(resultConsumer, searchPhaseController, context, (response) -> new ExpandSearchPhase(context, response, // collapse only happens if the request has inner hits (finalResponse) -> sendResponsePhase(finalResponse, context))); } - FetchSearchPhase(AtomicArray queryResults, + FetchSearchPhase(InitialSearchPhase.SearchPhaseResults resultConsumer, SearchPhaseController searchPhaseController, SearchPhaseContext context, Function nextPhaseFactory) { super("fetch"); - if (context.getNumShards() != queryResults.length()) { + if (context.getNumShards() != resultConsumer.getNumShards()) { throw new IllegalStateException("number of shards must match the length of the query results but doesn't:" - + context.getNumShards() + "!=" + queryResults.length()); + + context.getNumShards() + "!=" + resultConsumer.getNumShards()); } - this.fetchResults = new AtomicArray<>(queryResults.length()); + this.fetchResults = new AtomicArray<>(resultConsumer.getNumShards()); this.searchPhaseController = searchPhaseController; - this.queryResults = queryResults; + this.queryResults = resultConsumer.results; this.nextPhaseFactory = nextPhaseFactory; this.context = context; this.logger = context.getLogger(); + this.resultConsumer = resultConsumer; } @@ -99,7 +101,7 @@ private void innerRun() throws IOException { ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollSearch, queryResults); String scrollId = isScrollSearch ? TransportSearchHelper.buildScrollId(queryResults) : null; List> queryResultsAsList = queryResults.asList(); - final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResultsAsList); + final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce(); final boolean queryAndFetchOptimization = queryResults.length() == 1; final Runnable finishPhase = () -> moveToNextPhase(searchPhaseController, sortedShardDocs, scrollId, reducedQueryPhase, queryAndFetchOptimization ? @@ -119,7 +121,7 @@ private void innerRun() throws IOException { final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch ? searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs, numShards) : null; - final CountedCollector counter = new CountedCollector<>(fetchResults, + final CountedCollector counter = new CountedCollector<>(fetchResults::set, docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not finishPhase, context); for (int i = 0; i < docIdsToLoad.length; i++) { diff --git a/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java b/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java index dac215801fcea..f21e9d228d69f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java +++ b/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java @@ -28,12 +28,14 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.transport.ConnectTransportException; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Stream; /** * This is an abstract base class that encapsulates the logic to fan out to all shards in provided {@link GroupShardsIterator} @@ -213,4 +215,53 @@ private void onShardResult(int shardIndex, String nodeId, FirstResult result, Sh * @param listener the listener to notify on response */ protected abstract void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener); + + /** + * This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing + */ + static class SearchPhaseResults { + final AtomicArray results; + + SearchPhaseResults(int size) { + results = new AtomicArray<>(size); + } + + /** + * Returns the number of expected results this class should collect + */ + final int getNumShards() { + return results.length(); + } + + /** + * A stream of all non-null (successful) shard results + */ + final Stream getSuccessfulResults() { + return results.asList().stream().map(e -> e.value); + } + + /** + * Consumes a single shard result + * @param shardIndex the shards index, this is a 0-based id that is used to establish a 1 to 1 mapping to the searched shards + * @param result the shards result + */ + void consumeResult(int shardIndex, Result result) { + assert results.get(shardIndex) == null : "shardIndex: " + shardIndex + " is already set"; + results.set(shardIndex, result); + } + + /** + * Returns true iff a result if present for the given shard ID. + */ + final boolean hasResult(int shardIndex) { + return results.get(shardIndex) != null; + } + + /** + * Reduces the collected results + */ + SearchPhaseController.ReducedQueryPhase reduce() { + throw new UnsupportedOperationException("reduce is not supported"); + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 2cf0c317d00a9..d846c42dbea5d 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.transport.Transport; @@ -43,7 +42,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, - request, listener, shardsIts, startTime, clusterStateVersion, task); + request, listener, shardsIts, startTime, clusterStateVersion, task, new SearchPhaseResults<>(shardsIts.size())); this.searchPhaseController = searchPhaseController; } @@ -54,8 +53,8 @@ protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, Ac } @Override - protected SearchPhase getNextPhase(AtomicArray results, SearchPhaseContext context) { - return new DfsQueryPhase(results, searchPhaseController, + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new DfsQueryPhase(results.results, searchPhaseController, (queryResults) -> new FetchSearchPhase(queryResults, searchPhaseController, context), context); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java index 6786e60fd616e..1a21eb3cc3468 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java @@ -114,4 +114,5 @@ default void sendReleaseSearchContext(long contextId, Transport.Connection conne * a response is returned to the user indicating that all shards have failed. */ void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase); + } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 5193fe7278417..52fbf952fe408 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -44,6 +44,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -70,14 +71,6 @@ public class SearchPhaseController extends AbstractComponent { - private static final Comparator> QUERY_RESULT_ORDERING = (o1, o2) -> { - int i = o1.value.shardTarget().getIndex().compareTo(o2.value.shardTarget().getIndex()); - if (i == 0) { - i = o1.value.shardTarget().getShardId().id() - o2.value.shardTarget().getShardId().id(); - } - return i; - }; - private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0]; private final BigArrays bigArrays; @@ -149,6 +142,9 @@ private static long optionalSum(long left, long right) { * named completion suggestion across all shards. If more than one named completion suggestion is specified in the * request, the suggest docs for a named suggestion are ordered by the suggestion name. * + * Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate + * the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same. + * * @param ignoreFrom Whether to ignore the from and sort all hits in each shard result. * Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase. * @param resultsArr Shard result holder @@ -159,26 +155,31 @@ public ScoreDoc[] sortDocs(boolean ignoreFrom, AtomicArray entry : results) { if (entry.value.queryResult().hasHits()) { - if (result != null) { // we already have one, can't really optimize + if (hasResult) { // we already have one, can't really optimize canOptimize = false; break; } canOptimize = true; - result = entry.value.queryResult(); + hasResult = true; + resultToOptimize = entry.value.queryResult(); shardIndex = entry.index; } } + result = canOptimize ? resultToOptimize : results.get(0).value.queryResult(); + assert result != null; } if (canOptimize) { int offset = result.from(); @@ -224,74 +225,62 @@ public ScoreDoc[] sortDocs(boolean ignoreFrom, AtomicArray[] sortedResults = results.toArray(new AtomicArray.Entry[results.size()]); - Arrays.sort(sortedResults, QUERY_RESULT_ORDERING); - QuerySearchResultProvider firstResult = sortedResults[0].value; - - int topN = firstResult.queryResult().size(); - int from = firstResult.queryResult().from(); - if (ignoreFrom) { - from = 0; - } + final int topN = result.queryResult().size(); + final int from = ignoreFrom ? 0 : result.queryResult().from(); final TopDocs mergedTopDocs; - int numShards = resultsArr.length(); - if (firstResult.queryResult().topDocs() instanceof CollapseTopFieldDocs) { - CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) firstResult.queryResult().topDocs(); + final int numShards = resultsArr.length(); + if (result.queryResult().topDocs() instanceof CollapseTopFieldDocs) { + CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) result.queryResult().topDocs(); final Sort sort = new Sort(firstTopDocs.fields); final CollapseTopFieldDocs[] shardTopDocs = new CollapseTopFieldDocs[numShards]; - for (AtomicArray.Entry sortedResult : sortedResults) { + if (result.size() != shardTopDocs.length) { + // TopDocs#merge can't deal with null shard TopDocs + final CollapseTopFieldDocs empty = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0], + sort.getSort(), new Object[0], Float.NaN); + Arrays.fill(shardTopDocs, empty); + } + for (AtomicArray.Entry sortedResult : results) { TopDocs topDocs = sortedResult.value.queryResult().topDocs(); // the 'index' field is the position in the resultsArr atomic array shardTopDocs[sortedResult.index] = (CollapseTopFieldDocs) topDocs; } - // TopDocs#merge can't deal with null shard TopDocs - for (int i = 0; i < shardTopDocs.length; ++i) { - if (shardTopDocs[i] == null) { - shardTopDocs[i] = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0], - sort.getSort(), new Object[0], Float.NaN); - } - } mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs); - } else if (firstResult.queryResult().topDocs() instanceof TopFieldDocs) { - TopFieldDocs firstTopDocs = (TopFieldDocs) firstResult.queryResult().topDocs(); + } else if (result.queryResult().topDocs() instanceof TopFieldDocs) { + TopFieldDocs firstTopDocs = (TopFieldDocs) result.queryResult().topDocs(); final Sort sort = new Sort(firstTopDocs.fields); final TopFieldDocs[] shardTopDocs = new TopFieldDocs[resultsArr.length()]; - for (AtomicArray.Entry sortedResult : sortedResults) { + if (result.size() != shardTopDocs.length) { + // TopDocs#merge can't deal with null shard TopDocs + final TopFieldDocs empty = new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN); + Arrays.fill(shardTopDocs, empty); + } + for (AtomicArray.Entry sortedResult : results) { TopDocs topDocs = sortedResult.value.queryResult().topDocs(); // the 'index' field is the position in the resultsArr atomic array shardTopDocs[sortedResult.index] = (TopFieldDocs) topDocs; } - // TopDocs#merge can't deal with null shard TopDocs - for (int i = 0; i < shardTopDocs.length; ++i) { - if (shardTopDocs[i] == null) { - shardTopDocs[i] = new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN); - } - } mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs); } else { final TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()]; - for (AtomicArray.Entry sortedResult : sortedResults) { + if (result.size() != shardTopDocs.length) { + // TopDocs#merge can't deal with null shard TopDocs + Arrays.fill(shardTopDocs, Lucene.EMPTY_TOP_DOCS); + } + for (AtomicArray.Entry sortedResult : results) { TopDocs topDocs = sortedResult.value.queryResult().topDocs(); // the 'index' field is the position in the resultsArr atomic array shardTopDocs[sortedResult.index] = topDocs; } - // TopDocs#merge can't deal with null shard TopDocs - for (int i = 0; i < shardTopDocs.length; ++i) { - if (shardTopDocs[i] == null) { - shardTopDocs[i] = Lucene.EMPTY_TOP_DOCS; - } - } mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs); } ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs; final Map>> groupedCompletionSuggestions = new HashMap<>(); // group suggestions and assign shard index - for (AtomicArray.Entry sortedResult : sortedResults) { + for (AtomicArray.Entry sortedResult : results) { Suggest shardSuggest = sortedResult.value.queryResult().suggest(); if (shardSuggest != null) { for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) { @@ -461,23 +450,54 @@ private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFr /** * Reduces the given query results and consumes all aggregations and profile results. + * @param queryResults a list of non-null query shard results + */ + public final ReducedQueryPhase reducedQueryPhase(List> queryResults) { + return reducedQueryPhase(queryResults, null, 0); + } + + /** + * Reduces the given query results and consumes all aggregations and profile results. + * @param queryResults a list of non-null query shard results + * @param bufferdAggs a list of pre-collected / buffered aggregations. if this list is non-null all aggregations have been consumed + * from all non-null query results. + * @param numReducePhases the number of non-final reduce phases applied to the query results. * @see QuerySearchResult#consumeAggs() * @see QuerySearchResult#consumeProfileResult() */ - public final ReducedQueryPhase reducedQueryPhase(List> queryResults) { + private ReducedQueryPhase reducedQueryPhase(List> queryResults, + List bufferdAggs, int numReducePhases) { + assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases; + numReducePhases++; // increment for this phase long totalHits = 0; long fetchHits = 0; float maxScore = Float.NEGATIVE_INFINITY; boolean timedOut = false; Boolean terminatedEarly = null; - if (queryResults.isEmpty()) { - return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, null, null, null, null); + if (queryResults.isEmpty()) { // early terminate we have nothing to reduce + return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, null, null, null, null, + numReducePhases); } - QuerySearchResult firstResult = queryResults.get(0).value.queryResult(); + final QuerySearchResult firstResult = queryResults.get(0).value.queryResult(); final boolean hasSuggest = firstResult.suggest() != null; - final boolean hasAggs = firstResult.hasAggs(); final boolean hasProfileResults = firstResult.hasProfileResults(); - final List aggregationsList = hasAggs ? new ArrayList<>(queryResults.size()) : Collections.emptyList(); + final boolean consumeAggs; + final List aggregationsList; + if (bufferdAggs != null) { + consumeAggs = false; + // we already have results from intermediate reduces and just need to perform the final reduce + assert firstResult.hasAggs() : "firstResult has no aggs but we got non null buffered aggs?"; + aggregationsList = bufferdAggs; + } else if (firstResult.hasAggs()) { + // the number of shards was less than the buffer size so we reduce agg results directly + aggregationsList = new ArrayList<>(queryResults.size()); + consumeAggs = true; + } else { + // no aggregations + aggregationsList = Collections.emptyList(); + consumeAggs = false; + } + // count the total (we use the query result provider here, since we might not get any hits (we scrolled past them)) final Map> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap(); final Map profileResults = hasProfileResults ? new HashMap<>(queryResults.size()) @@ -506,7 +526,7 @@ public final ReducedQueryPhase reducedQueryPhase(List aggregationsList) { + ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService, false); + return aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList, + null, reduceContext); } private InternalAggregations reduceAggs(List aggregationsList, - List pipelineAggregators) { - ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService); + List pipelineAggregators, ReduceContext reduceContext) { InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, reduceContext); if (pipelineAggregators != null) { List newAggs = StreamSupport.stream(aggregations.spliterator(), false) @@ -558,10 +589,15 @@ public static final class ReducedQueryPhase { final InternalAggregations aggregations; // the reduced profile results final SearchProfileShardResults shardResults; + // the number of reduces phases + final int numReducePhases; ReducedQueryPhase(long totalHits, long fetchHits, float maxScore, boolean timedOut, Boolean terminatedEarly, QuerySearchResult oneResult, Suggest suggest, InternalAggregations aggregations, - SearchProfileShardResults shardResults) { + SearchProfileShardResults shardResults, int numReducePhases) { + if (numReducePhases <= 0) { + throw new IllegalArgumentException("at least one reduce phase must have been applied but was: " + numReducePhases); + } this.totalHits = totalHits; this.fetchHits = fetchHits; if (Float.isInfinite(maxScore)) { @@ -575,6 +611,7 @@ public static final class ReducedQueryPhase { this.suggest = suggest; this.aggregations = aggregations; this.shardResults = shardResults; + this.numReducePhases = numReducePhases; } /** @@ -582,7 +619,7 @@ public static final class ReducedQueryPhase { * @see #merge(boolean, ScoreDoc[], ReducedQueryPhase, AtomicArray) */ public InternalSearchResponse buildResponse(SearchHits hits) { - return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly); + return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly, numReducePhases); } /** @@ -593,4 +630,95 @@ public boolean isEmpty() { } } + /** + * A {@link org.elasticsearch.action.search.InitialSearchPhase.SearchPhaseResults} implementation + * that incrementally reduces aggregation results as shard results are consumed. + * This implementation can be configured to batch up a certain amount of results and only reduce them + * iff the buffer is exhausted. + */ + static final class QueryPhaseResultConsumer + extends InitialSearchPhase.SearchPhaseResults { + private final InternalAggregations[] buffer; + private int index; + private final SearchPhaseController controller; + private int numReducePhases = 0; + + /** + * Creates a new {@link QueryPhaseResultConsumer} + * @param controller a controller instance to reduce the query response objects + * @param expectedResultSize the expected number of query results. Corresponds to the number of shards queried + * @param bufferSize the size of the reduce buffer. if the buffer size is smaller than the number of expected results + * the buffer is used to incrementally reduce aggregation results before all shards responded. + */ + private QueryPhaseResultConsumer(SearchPhaseController controller, int expectedResultSize, int bufferSize) { + super(expectedResultSize); + if (expectedResultSize != 1 && bufferSize < 2) { + throw new IllegalArgumentException("buffer size must be >= 2 if there is more than one expected result"); + } + if (expectedResultSize <= bufferSize) { + throw new IllegalArgumentException("buffer size must be less than the expected result size"); + } + this.controller = controller; + // no need to buffer anything if we have less expected results. in this case we don't consume any results ahead of time. + this.buffer = new InternalAggregations[bufferSize]; + } + + @Override + public void consumeResult(int shardIndex, QuerySearchResultProvider result) { + super.consumeResult(shardIndex, result); + QuerySearchResult queryResult = result.queryResult(); + assert queryResult.hasAggs() : "this collector should only be used if aggs are requested"; + consumeInternal(queryResult); + } + + private synchronized void consumeInternal(QuerySearchResult querySearchResult) { + InternalAggregations aggregations = (InternalAggregations) querySearchResult.consumeAggs(); + if (index == buffer.length) { + InternalAggregations reducedAggs = controller.reduceAggsIncrementally(Arrays.asList(buffer)); + Arrays.fill(buffer, null); + numReducePhases++; + buffer[0] = reducedAggs; + index = 1; + } + final int i = index++; + buffer[i] = aggregations; + } + + private synchronized List getRemaining() { + return Arrays.asList(buffer).subList(0, index); + } + + @Override + public ReducedQueryPhase reduce() { + return controller.reducedQueryPhase(results.asList(), getRemaining(), numReducePhases); + } + + /** + * Returns the number of buffered results + */ + int getNumBuffered() { + return index; + } + + int getNumReducePhases() { return numReducePhases; } + } + + /** + * Returns a new SearchPhaseResults instance. This might return an instance that reduces search responses incrementally. + */ + InitialSearchPhase.SearchPhaseResults newSearchPhaseResults(SearchRequest request, int numShards) { + SearchSourceBuilder source = request.source(); + if (source != null && source.aggregations() != null) { + if (request.getBatchedReduceSize() < numShards) { + // only use this if there are aggs and if there are more shards than we should reduce at once + return new QueryPhaseResultConsumer(this, numShards, request.getBatchedReduceSize()); + } + } + return new InitialSearchPhase.SearchPhaseResults(numShards) { + @Override + public ReducedQueryPhase reduce() { + return reducedQueryPhase(results.asList()); + } + }; + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 8d4edfeb79f79..210a9aefda755 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.transport.Transport; @@ -44,17 +43,19 @@ final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction results, SearchPhaseContext context) { + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { return new FetchSearchPhase(results, searchPhaseController, context); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 9c69f1a763f38..9e35cca05b94f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -70,6 +70,8 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest private Scroll scroll; + private int batchedReduceSize = 512; + private String[] types = Strings.EMPTY_ARRAY; public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosed(); @@ -274,6 +276,25 @@ public Boolean requestCache() { return this.requestCache; } + /** + * Sets the number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection + * mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. + */ + public void setBatchedReduceSize(int batchedReduceSize) { + if (batchedReduceSize <= 1) { + throw new IllegalArgumentException("batchedReduceSize must be >= 2"); + } + this.batchedReduceSize = batchedReduceSize; + } + + /** + * Returns the number of shard results that should be reduced at once on the coordinating node. This value should be used as a + * protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. + */ + public int getBatchedReduceSize() { + return batchedReduceSize; + } + /** * @return true if the request only has suggest */ @@ -320,6 +341,7 @@ public void readFrom(StreamInput in) throws IOException { types = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); requestCache = in.readOptionalBoolean(); + batchedReduceSize = in.readVInt(); } @Override @@ -337,6 +359,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(types); indicesOptions.writeIndicesOptions(out); out.writeOptionalBoolean(requestCache); + out.writeVInt(batchedReduceSize); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 865cf01430fb0..ffe2c1b20c516 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -523,4 +523,13 @@ private SearchSourceBuilder sourceBuilder() { } return request.source(); } + + /** + * Sets the number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection + * mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. + */ + public SearchRequestBuilder setBatchedReduceSize(int batchedReduceSize) { + this.request.setBatchedReduceSize(batchedReduceSize); + return this; + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 4b8ba5e64b695..54d8eab99e727 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -61,7 +61,8 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb public SearchResponse() { } - public SearchResponse(InternalSearchResponse internalResponse, String scrollId, int totalShards, int successfulShards, long tookInMillis, ShardSearchFailure[] shardFailures) { + public SearchResponse(InternalSearchResponse internalResponse, String scrollId, int totalShards, int successfulShards, + long tookInMillis, ShardSearchFailure[] shardFailures) { this.internalResponse = internalResponse; this.scrollId = scrollId; this.totalShards = totalShards; @@ -106,6 +107,13 @@ public Boolean isTerminatedEarly() { return internalResponse.terminatedEarly(); } + /** + * Returns the number of reduce phases applied to obtain this search response + */ + public int getNumReducePhases() { + return internalResponse.getNumReducePhases(); + } + /** * How long the search took. */ @@ -172,13 +180,6 @@ public void scrollId(String scrollId) { return internalResponse.profile(); } - static final class Fields { - static final String _SCROLL_ID = "_scroll_id"; - static final String TOOK = "took"; - static final String TIMED_OUT = "timed_out"; - static final String TERMINATED_EARLY = "terminated_early"; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -189,14 +190,18 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { if (scrollId != null) { - builder.field(Fields._SCROLL_ID, scrollId); + builder.field("_scroll_id", scrollId); } - builder.field(Fields.TOOK, tookInMillis); - builder.field(Fields.TIMED_OUT, isTimedOut()); + builder.field("took", tookInMillis); + builder.field("timed_out", isTimedOut()); if (isTerminatedEarly() != null) { - builder.field(Fields.TERMINATED_EARLY, isTerminatedEarly()); + builder.field("terminated_early", isTerminatedEarly()); + } + if (getNumReducePhases() != 1) { + builder.field("num_reduce_phases", getNumReducePhases()); } - RestActions.buildBroadcastShardsHeader(builder, params, getTotalShards(), getSuccessfulShards(), getFailedShards(), getShardFailures()); + RestActions.buildBroadcastShardsHeader(builder, params, getTotalShards(), getSuccessfulShards(), getFailedShards(), + getShardFailures()); internalResponse.toXContent(builder, params); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java index 107c791a069eb..fa02dac9e1e2d 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java @@ -19,14 +19,12 @@ package org.elasticsearch.action.support.replication; -import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -38,8 +36,6 @@ public abstract class ReplicatedWriteRequest> extends ReplicationRequest implements WriteRequest { private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; - private long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; - /** * Constructor for deserialization. */ @@ -66,32 +62,11 @@ public RefreshPolicy getRefreshPolicy() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); refreshPolicy = RefreshPolicy.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { - seqNo = in.readZLong(); - } else { - seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; - } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); refreshPolicy.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { - out.writeZLong(seqNo); - } - } - - /** - * Returns the sequence number for this operation. The sequence number is assigned while the operation - * is performed on the primary shard. - */ - public long getSeqNo() { - return seqNo; - } - - /** sets the sequence number for this operation. should only be called on the primary shard */ - public void setSeqNo(long seqNo) { - this.seqNo = seqNo; } } diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 0235dd95a4b18..67d62113062a7 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -171,7 +171,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< final ShardId shardId = request.getShardId(); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); final IndexShard indexShard = indexService.getShard(shardId.getId()); - final UpdateHelper.Result result = updateHelper.prepare(request, indexShard, threadPool::estimatedTimeInMillis); + final UpdateHelper.Result result = updateHelper.prepare(request, indexShard, threadPool::absoluteTimeInMillis); switch (result.getResponseResult()) { case CREATED: IndexRequest upsertRequest = result.action(); diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index d34865b687258..2f153cdbef749 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -30,8 +30,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; @@ -49,7 +52,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; public class UpdateRequest extends InstanceShardOperationRequest - implements DocWriteRequest, WriteRequest { + implements DocWriteRequest, WriteRequest, ToXContentObject { private String type; private String id; @@ -846,4 +849,42 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(scriptedUpsert); } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (docAsUpsert) { + builder.field("doc_as_upsert", docAsUpsert); + } + if (doc != null) { + XContentType xContentType = doc.getContentType(); + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, doc.source(), xContentType)) { + builder.field("doc"); + builder.copyCurrentStructure(parser); + } + } + if (script != null) { + builder.field("script", script); + } + if (upsertRequest != null) { + XContentType xContentType = upsertRequest.getContentType(); + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, upsertRequest.source(), xContentType)) { + builder.field("upsert"); + builder.copyCurrentStructure(parser); + } + } + if (scriptedUpsert) { + builder.field("scripted_upsert", scriptedUpsert); + } + if (detectNoop == false) { + builder.field("detect_noop", detectNoop); + } + if (fields != null) { + builder.array("fields", fields); + } + if (fetchSourceContext != null) { + builder.field("_source", fetchSourceContext); + } + builder.endObject(); + return builder; + } } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java b/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java index 2e861baa8c946..6f736a024eb39 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateResponse.java @@ -114,7 +114,7 @@ public String toString() { public static UpdateResponse fromXContent(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); - UpdateResponseBuilder context = new UpdateResponseBuilder(); + Builder context = new Builder(); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { parseXContentFields(parser, context); } @@ -124,7 +124,7 @@ public static UpdateResponse fromXContent(XContentParser parser) throws IOExcept /** * Parse the current token and update the parsing context appropriately. */ - public static void parseXContentFields(XContentParser parser, UpdateResponseBuilder context) throws IOException { + public static void parseXContentFields(XContentParser parser, Builder context) throws IOException { XContentParser.Token token = parser.currentToken(); String currentFieldName = parser.currentName(); @@ -137,7 +137,12 @@ public static void parseXContentFields(XContentParser parser, UpdateResponseBuil } } - public static class UpdateResponseBuilder extends DocWriteResponse.DocWriteResponseBuilder { + /** + * Builder class for {@link UpdateResponse}. This builder is usually used during xcontent parsing to + * temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to + * instantiate the {@link UpdateResponse}. + */ + public static class Builder extends DocWriteResponse.Builder { private GetResult getResult = null; diff --git a/core/src/main/java/org/elasticsearch/common/Strings.java b/core/src/main/java/org/elasticsearch/common/Strings.java index 2f55ab46e7e7b..7cce81674d711 100644 --- a/core/src/main/java/org/elasticsearch/common/Strings.java +++ b/core/src/main/java/org/elasticsearch/common/Strings.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -712,10 +711,12 @@ public static Set commaDelimitedListToSet(String str) { * @return the delimited String */ public static String collectionToDelimitedString(Iterable coll, String delim, String prefix, String suffix) { - return collectionToDelimitedString(coll, delim, prefix, suffix, new StringBuilder()); + StringBuilder sb = new StringBuilder(); + collectionToDelimitedString(coll, delim, prefix, suffix, sb); + return sb.toString(); } - public static String collectionToDelimitedString(Iterable coll, String delim, String prefix, String suffix, StringBuilder sb) { + public static void collectionToDelimitedString(Iterable coll, String delim, String prefix, String suffix, StringBuilder sb) { Iterator it = coll.iterator(); while (it.hasNext()) { sb.append(prefix).append(it.next()).append(suffix); @@ -723,7 +724,6 @@ public static String collectionToDelimitedString(Iterable coll, String delim, sb.append(delim); } } - return sb.toString(); } /** @@ -758,12 +758,14 @@ public static String collectionToCommaDelimitedString(Iterable coll) { * @return the delimited String */ public static String arrayToDelimitedString(Object[] arr, String delim) { - return arrayToDelimitedString(arr, delim, new StringBuilder()); + StringBuilder sb = new StringBuilder(); + arrayToDelimitedString(arr, delim, sb); + return sb.toString(); } - public static String arrayToDelimitedString(Object[] arr, String delim, StringBuilder sb) { + public static void arrayToDelimitedString(Object[] arr, String delim, StringBuilder sb) { if (isEmpty(arr)) { - return ""; + return; } for (int i = 0; i < arr.length; i++) { if (i > 0) { @@ -771,7 +773,6 @@ public static String arrayToDelimitedString(Object[] arr, String delim, StringBu } sb.append(arr[i]); } - return sb.toString(); } /** diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java index 9092e13eb1b40..0e790c0dc8b76 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java @@ -63,7 +63,7 @@ public BlobPath add(String path) { public String buildAsString() { String p = String.join(SEPARATOR, paths); - if (p.isEmpty()) { + if (p.isEmpty() || p.endsWith(SEPARATOR)) { return p; } return p + SEPARATOR; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index c65542093d353..6f56a547d3fba 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -39,7 +39,6 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; public class MembershipAction extends AbstractComponent { @@ -63,8 +62,7 @@ public interface MembershipListener { private final MembershipListener listener; - public MembershipAction(Settings settings, TransportService transportService, - Supplier localNodeSupplier, MembershipListener listener) { + public MembershipAction(Settings settings, TransportService transportService, MembershipListener listener) { super(settings); this.transportService = transportService; this.listener = listener; @@ -73,7 +71,7 @@ public MembershipAction(Settings settings, TransportService transportService, transportService.registerRequestHandler(DISCOVERY_JOIN_ACTION_NAME, JoinRequest::new, ThreadPool.Names.GENERIC, new JoinRequestRequestHandler()); transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, - () -> new ValidateJoinRequest(localNodeSupplier), ThreadPool.Names.GENERIC, + () -> new ValidateJoinRequest(), ThreadPool.Names.GENERIC, new ValidateJoinRequestRequestHandler()); transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new, ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler()); @@ -155,22 +153,18 @@ public void onFailure(Exception e) { } static class ValidateJoinRequest extends TransportRequest { - private final Supplier localNode; private ClusterState state; - ValidateJoinRequest(Supplier localNode) { - this.localNode = localNode; - } + ValidateJoinRequest() {} ValidateJoinRequest(ClusterState state) { this.state = state; - this.localNode = state.nodes()::getLocalNode; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - this.state = ClusterState.readFrom(in, localNode.get()); + this.state = ClusterState.readFrom(in, null); } @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 94c46ed867094..be6f52fc22c0c 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -191,7 +191,7 @@ public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService t new NewPendingClusterStateListener(), discoverySettings, clusterService.getClusterName()); - this.membership = new MembershipAction(settings, transportService, this::localNode, new MembershipListener()); + this.membership = new MembershipAction(settings, transportService, new MembershipListener()); this.joinThreadControl = new JoinThreadControl(); transportService.registerRequestHandler( diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 36b8ab6574cbb..f6b452502a5cc 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -391,6 +391,14 @@ public IndexResult(long version, long seqNo, boolean created) { this.created = created; } + /** + * use in case of index operation failed before getting to internal engine + * (e.g while preparing operation or updating mappings) + * */ + public IndexResult(Exception failure, long version) { + this(failure, version, SequenceNumbersService.UNASSIGNED_SEQ_NO); + } + public IndexResult(Exception failure, long version, long seqNo) { super(Operation.TYPE.INDEX, failure, version, seqNo); this.created = false; diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 4fe947660c142..60dddc4d40db1 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -188,7 +188,7 @@ public Codec getCodec() { /** * Returns a thread-pool mainly used to get estimated time stamps from - * {@link org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule + * {@link org.elasticsearch.threadpool.ThreadPool#relativeTimeInMillis()} and to schedule * async force merge calls on the {@link org.elasticsearch.threadpool.ThreadPool.Names#FORCE_MERGE} thread-pool */ public ThreadPool getThreadPool() { diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index bcfee5026ce9a..0fa6855ce0867 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -147,7 +147,7 @@ public InternalEngine(EngineConfig engineConfig) throws EngineException { EngineMergeScheduler scheduler = null; boolean success = false; try { - this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis(); + this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings()); throttle = new IndexThrottle(); @@ -446,7 +446,7 @@ private boolean checkVersionConflict(final Operation op, final long currentVersi private long checkDeletedAndGCed(VersionValue versionValue) { long currentVersion; - if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) { + if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().relativeTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) { currentVersion = Versions.NOT_FOUND; // deleted, and GC } else { currentVersion = versionValue.version(); @@ -478,6 +478,20 @@ private boolean canOptimizeAddDocument(Index index) { return false; } + private boolean assertVersionType(final Engine.Operation operation) { + if (operation.origin() == Operation.Origin.REPLICA || + operation.origin() == Operation.Origin.PEER_RECOVERY || + operation.origin() == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + // ensure that replica operation has expected version type for replication + // ensure that versionTypeForReplicationAndRecovery is idempotent + assert operation.versionType() == operation.versionType().versionTypeForReplicationAndRecovery() + : "unexpected version type in request from [" + operation.origin().name() + "] " + + "found [" + operation.versionType().name() + "] " + + "expected [" + operation.versionType().versionTypeForReplicationAndRecovery().name() + "]"; + } + return true; + } + private boolean assertSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) && origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { // legacy support @@ -499,6 +513,7 @@ public IndexResult index(Index index) throws IOException { try (ReleasableLock releasableLock = readLock.acquire()) { ensureOpen(); assert assertSequenceNumber(index.origin(), index.seqNo()); + assert assertVersionType(index); final Translog.Location location; long seqNo = index.seqNo(); try (Releasable ignored = acquireLock(index.uid()); @@ -692,6 +707,7 @@ private static void update(final Term uid, final List doc public DeleteResult delete(Delete delete) throws IOException { DeleteResult result; try (ReleasableLock ignored = readLock.acquire()) { + assert assertVersionType(delete); ensureOpen(); // NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments: result = innerDelete(delete); @@ -710,7 +726,7 @@ public DeleteResult delete(Delete delete) throws IOException { private void maybePruneDeletedTombstones() { // It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it // every 1/4 of gcDeletesInMillis: - if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().estimatedTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) { + if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) { pruneDeletedTombstones(); } } @@ -756,7 +772,7 @@ private DeleteResult innerDelete(Delete delete) throws IOException { deleteResult = new DeleteResult(updatedVersion, seqNo, found); versionMap.putUnderLock(delete.uid().bytes(), - new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis())); + new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().relativeTimeInMillis())); } if (!deleteResult.hasFailure()) { location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY @@ -1031,7 +1047,7 @@ public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineExcepti } private void pruneDeletedTombstones() { - long timeMSec = engineConfig.getThreadPool().estimatedTimeInMillis(); + long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); // TODO: not good that we reach into LiveVersionMap here; can we move this inside VersionMap instead? problem is the dirtyLock... diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java index 6cac16d2fcee4..8e18c820b7965 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java @@ -23,7 +23,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.TermsQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.MultiTermQuery; @@ -114,12 +114,12 @@ public boolean isSearchable() { @Override public Query termQuery(Object value, @Nullable QueryShardContext context) { final BytesRef[] uids = Uid.createUidsForTypesAndId(context.queryTypes(), value); - return new TermsQuery(UidFieldMapper.NAME, uids); + return new TermInSetQuery(UidFieldMapper.NAME, uids); } @Override public Query termsQuery(List values, @Nullable QueryShardContext context) { - return new TermsQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(context.queryTypes(), values)); + return new TermInSetQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(context.queryTypes(), values)); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java index e8a11fc5d478a..a7d59fcfb4285 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/StringFieldType.java @@ -22,7 +22,7 @@ import java.util.List; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.TermsQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; @@ -53,7 +53,7 @@ public Query termsQuery(List values, QueryShardContext context) { for (int i = 0; i < bytesRefs.length; i++) { bytesRefs[i] = indexedValueForSearch(values.get(i)); } - return new TermsQuery(name(), bytesRefs); + return new TermInSetQuery(name(), bytesRefs); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java index e1fd56616f3e1..89b09cc068e63 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java @@ -22,9 +22,9 @@ import java.util.List; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; @@ -66,7 +66,7 @@ public Query termsQuery(List values, QueryShardContext context) { for (int i = 0; i < bytesRefs.length; i++) { bytesRefs[i] = indexedValueForSearch(values.get(i)); } - return new TermsQuery(name(), bytesRefs); + return new TermInSetQuery(name(), bytesRefs); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index 5f5be04a91455..c24747e62c8cc 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -26,13 +26,13 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermContext; -import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.Lucene; @@ -172,7 +172,7 @@ public void checkCompatibility(MappedFieldType other, * Specialization for a disjunction over many _type */ public static class TypesQuery extends Query { - // Same threshold as TermsQuery + // Same threshold as TermInSetQuery private static final int BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD = 16; private final BytesRef[] types; @@ -220,7 +220,7 @@ public Query rewrite(IndexReader reader) throws IOException { } return new ConstantScoreQuery(bq.build()); } - return new TermsQuery(CONTENT_TYPE, types); + return new TermInSetQuery(CONTENT_TYPE, types); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java index 630cf2d93b9e0..5857ef9abf373 100644 --- a/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java @@ -19,8 +19,8 @@ package org.elasticsearch.index.query; -import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -175,7 +175,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { Collections.addAll(typesForQuery, types); } - query = new TermsQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(typesForQuery, ids)); + query = new TermInSetQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(typesForQuery, ids)); } return query; } diff --git a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index 9f083eaab096e..5df7ace69bb39 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -21,10 +21,10 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.Fields; -import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; @@ -1165,7 +1165,7 @@ private static void handleExclude(BooleanQuery.Builder boolQuery, Item[] likeIte uids.add(createUidAsBytes(item.type(), item.id())); } if (!uids.isEmpty()) { - TermsQuery query = new TermsQuery(UidFieldMapper.NAME, uids.toArray(new BytesRef[uids.size()])); + TermInSetQuery query = new TermInSetQuery(UidFieldMapper.NAME, uids.toArray(new BytesRef[uids.size()])); boolQuery.add(query, BooleanClause.Occur.MUST_NOT); } } diff --git a/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java index 198537f006033..620004e206073 100644 --- a/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.apache.lucene.queries.TermsQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; @@ -253,7 +253,7 @@ public int size() { if (o instanceof BytesRef) { b = (BytesRef) o; } else { - builder.copyChars(o.toString()); + builder.copyChars(o.toString()); b = builder.get(); } bytesOut.writeBytes(b.bytes, b.offset, b.length); @@ -410,7 +410,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { for (int i = 0; i < filterValues.length; i++) { filterValues[i] = BytesRefs.toBytesRef(values.get(i)); } - return new TermsQuery(fieldName, filterValues); + return new TermInSetQuery(fieldName, filterValues); } } diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java index 33cb70a0d0ba9..7bb1e51cd2372 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java @@ -137,7 +137,7 @@ private double addDouble(double current, double other) { } public void add(Path path) { - total = addLong(total, path.total); + total = FsProbe.adjustForHugeFilesystems(addLong(total, path.total)); free = addLong(free, path.free); available = addLong(available, path.available); if (path.spins != null && path.spins.booleanValue()) { diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java index d079a7201686f..1fdae49a6f16b 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java @@ -136,7 +136,11 @@ List readProcDiskStats() throws IOException { } /* See: https://bugs.openjdk.java.net/browse/JDK-8162520 */ - private static long adjustForHugeFilesystems(long bytes) { + /** + * Take a large value intended to be positive, and if it has overflowed, + * return {@code Long.MAX_VALUE} instead of a negative number. + */ + static long adjustForHugeFilesystems(long bytes) { if (bytes < 0) { return Long.MAX_VALUE; } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 47037460e2b6e..9a168e84dd683 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -24,30 +24,20 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.AcknowledgedRestListener; import java.io.IOException; +import java.util.HashMap; import java.util.Map; import java.util.Set; -import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.client.Requests.updateSettingsRequest; -import static org.elasticsearch.common.util.set.Sets.newHashSet; public class RestUpdateSettingsAction extends BaseRestHandler { - private static final Set VALUES_TO_EXCLUDE = unmodifiableSet(newHashSet( - "error_trace", - "pretty", - "timeout", - "master_timeout", - "index", - "preserve_existing", - "expand_wildcards", - "ignore_unavailable", - "allow_no_indices")); public RestUpdateSettingsAction(Settings settings, RestController controller) { super(settings); @@ -63,29 +53,22 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); - Settings.Builder updateSettings = Settings.builder(); - String bodySettingsStr = request.content().utf8ToString(); - if (Strings.hasText(bodySettingsStr)) { - Settings buildSettings = Settings.builder() - .loadFromSource(bodySettingsStr, request.getXContentType()) - .build(); - for (Map.Entry entry : buildSettings.getAsMap().entrySet()) { - String key = entry.getKey(); - String value = entry.getValue(); + Map settings = new HashMap<>(); + if (request.hasContent()) { + try (XContentParser parser = request.contentParser()) { + Map bodySettings = parser.map(); + Object innerBodySettings = bodySettings.get("settings"); // clean up in case the body is wrapped with "settings" : { ... } - if (key.startsWith("settings.")) { - key = key.substring("settings.".length()); + if (innerBodySettings instanceof Map) { + @SuppressWarnings("unchecked") + Map innerBodySettingsMap = (Map) innerBodySettings; + settings.putAll(innerBodySettingsMap); + } else { + settings.putAll(bodySettings); } - updateSettings.put(key, value); } } - for (Map.Entry entry : request.params().entrySet()) { - if (VALUES_TO_EXCLUDE.contains(entry.getKey())) { - continue; - } - updateSettings.put(entry.getKey(), entry.getValue()); - } - updateSettingsRequest.settings(updateSettings); + updateSettingsRequest.settings(settings); return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener<>(channel)); } @@ -94,5 +77,4 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC protected Set responseParams() { return Settings.FORMAT_PARAMS; } - } diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index 715d90b30c82b..d6af84d947299 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -19,9 +19,7 @@ package org.elasticsearch.rest.action.document; -import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.Requests; @@ -30,20 +28,16 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestStatusToXContentListener; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; -import static org.elasticsearch.rest.RestStatus.OK; /** *

@@ -95,36 +89,11 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
         bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields,
             defaultFetchSourceContext, defaultPipeline, null, allowExplicitIndex, request.getXContentType());
 
-        return channel -> client.bulk(bulkRequest, new RestBuilderListener(channel) {
-            @Override
-            public RestResponse buildResponse(BulkResponse response, XContentBuilder builder) throws Exception {
-                builder.startObject();
-                builder.field(Fields.TOOK, response.getTookInMillis());
-                if (response.getIngestTookInMillis() != BulkResponse.NO_INGEST_TOOK) {
-                    builder.field(Fields.INGEST_TOOK, response.getIngestTookInMillis());
-                }
-                builder.field(Fields.ERRORS, response.hasFailures());
-                builder.startArray(Fields.ITEMS);
-                for (BulkItemResponse itemResponse : response) {
-                    itemResponse.toXContent(builder, request);
-                }
-                builder.endArray();
-
-                builder.endObject();
-                return new BytesRestResponse(OK, builder);
-            }
-        });
+        return channel -> client.bulk(bulkRequest, new RestStatusToXContentListener<>(channel));
     }
 
     @Override
     public boolean supportsContentStream() {
         return true;
     }
-
-    static final class Fields {
-        static final String ITEMS = "items";
-        static final String ERRORS = "errors";
-        static final String TOOK = "took";
-        static final String INGEST_TOOK = "ingest_took";
-    }
 }
diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
index bf8308202b7f9..89e2f23861c98 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java
@@ -20,7 +20,6 @@
 package org.elasticsearch.rest.action.search;
 
 import org.elasticsearch.action.search.SearchRequest;
-import org.elasticsearch.action.search.SearchType;
 import org.elasticsearch.action.support.IndicesOptions;
 import org.elasticsearch.client.node.NodeClient;
 import org.elasticsearch.common.Strings;
@@ -94,6 +93,9 @@ public static void parseSearchRequest(SearchRequest searchRequest, RestRequest r
             searchRequest.source().parseXContent(context);
         }
 
+        final int batchedReduceSize = request.paramAsInt("batched_reduce_size", searchRequest.getBatchedReduceSize());
+        searchRequest.setBatchedReduceSize(batchedReduceSize);
+
         // do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types
         // from the REST layer. these modes are an internal optimization and should
         // not be specified explicitly by the user.
diff --git a/core/src/main/java/org/elasticsearch/script/Script.java b/core/src/main/java/org/elasticsearch/script/Script.java
index c6a7319372b3a..9f8a774398cf1 100644
--- a/core/src/main/java/org/elasticsearch/script/Script.java
+++ b/core/src/main/java/org/elasticsearch/script/Script.java
@@ -169,9 +169,10 @@ private void setInline(XContentParser parser) {
                 type = ScriptType.INLINE;
 
                 if (parser.currentToken() == Token.START_OBJECT) {
-                    XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType());
-                    idOrCode = builder.copyCurrentStructure(parser).bytes().utf8ToString();
-                    options.put(CONTENT_TYPE_OPTION, parser.contentType().mediaType());
+                    //this is really for search templates, that need to be converted to json format
+                    XContentBuilder builder = XContentFactory.jsonBuilder();
+                    idOrCode = builder.copyCurrentStructure(parser).string();
+                    options.put(CONTENT_TYPE_OPTION, XContentType.JSON.mediaType());
                 } else {
                     idOrCode = parser.text();
                 }
diff --git a/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java
index e6c5b09362c73..11b7821390847 100644
--- a/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java
+++ b/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java
@@ -37,7 +37,6 @@
 import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.common.xcontent.XContentHelper;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.common.xcontent.XContentParser.Token;
 import org.elasticsearch.common.xcontent.XContentType;
@@ -107,9 +106,10 @@ private void setLang(String lang) {
         private void setCode(XContentParser parser) {
             try {
                 if (parser.currentToken() == Token.START_OBJECT) {
-                    XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType());
-                    code = builder.copyCurrentStructure(parser).bytes().utf8ToString();
-                    options.put(Script.CONTENT_TYPE_OPTION, parser.contentType().mediaType());
+                    //this is really for search templates, that need to be converted to json format
+                    XContentBuilder builder = XContentFactory.jsonBuilder();
+                    code = builder.copyCurrentStructure(parser).string();
+                    options.put(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType());
                 } else {
                     code = parser.text();
                 }
@@ -263,11 +263,11 @@ public static StoredScriptSource parse(String lang, BytesReference content, XCon
                     if (lang == null) {
                         return PARSER.apply(parser, null).build();
                     } else {
-                        try (XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType())) {
+                        //this is really for search templates, that need to be converted to json format
+                        try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
                             builder.copyCurrentStructure(parser);
-
                             return new StoredScriptSource(lang, builder.string(),
-                                Collections.singletonMap(Script.CONTENT_TYPE_OPTION, parser.contentType().mediaType()));
+                                Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()));
                         }
                     }
 
@@ -284,11 +284,11 @@ public static StoredScriptSource parse(String lang, BytesReference content, XCon
 
                     if (token == Token.VALUE_STRING) {
                         return new StoredScriptSource(lang, parser.text(),
-                            Collections.singletonMap(Script.CONTENT_TYPE_OPTION, parser.contentType().mediaType()));
+                            Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()));
                     }
                 }
 
-                try (XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType())) {
+                try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
                     if (token != Token.START_OBJECT) {
                         builder.startObject();
                         builder.copyCurrentStructure(parser);
@@ -298,7 +298,7 @@ public static StoredScriptSource parse(String lang, BytesReference content, XCon
                     }
 
                     return new StoredScriptSource(lang, builder.string(),
-                        Collections.singletonMap(Script.CONTENT_TYPE_OPTION, parser.contentType().mediaType()));
+                        Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()));
                 }
             }
         } catch (IOException ioe) {
diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java
index 9044db37a33e8..3d093e5ae7282 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchService.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchService.java
@@ -561,7 +561,7 @@ private void contextProcessing(SearchContext context) {
     }
 
     private void contextProcessedSuccessfully(SearchContext context) {
-        context.accessed(threadPool.estimatedTimeInMillis());
+        context.accessed(threadPool.relativeTimeInMillis());
     }
 
     private void cleanContext(SearchContext context) {
@@ -794,7 +794,7 @@ public int getActiveContexts() {
     class Reaper implements Runnable {
         @Override
         public void run() {
-            final long time = threadPool.estimatedTimeInMillis();
+            final long time = threadPool.relativeTimeInMillis();
             for (SearchContext context : activeContexts.values()) {
                 // Use the same value for both checks since lastAccessTime can
                 // be modified by another thread between checks!
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java
index 6af896426a798..563a958109be2 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java
@@ -47,10 +47,21 @@ public static class ReduceContext {
 
         private final BigArrays bigArrays;
         private final ScriptService scriptService;
+        private final boolean isFinalReduce;
 
-        public ReduceContext(BigArrays bigArrays, ScriptService scriptService) {
+        public ReduceContext(BigArrays bigArrays, ScriptService scriptService, boolean isFinalReduce) {
             this.bigArrays = bigArrays;
             this.scriptService = scriptService;
+            this.isFinalReduce = isFinalReduce;
+        }
+
+        /**
+         * Returns true iff the current reduce phase is the final reduce phase. This indicates if operations like
+         * pipeline aggregations should be applied or if specific features like minDocCount should be taken into account.
+         * Operations that are potentially loosing information can only be applied during the final reduce phase.
+         */
+        public boolean isFinalReduce() {
+            return isFinalReduce;
         }
 
         public BigArrays bigArrays() {
@@ -111,8 +122,10 @@ public String getName() {
      */
     public final InternalAggregation reduce(List aggregations, ReduceContext reduceContext) {
         InternalAggregation aggResult = doReduce(aggregations, reduceContext);
-        for (PipelineAggregator pipelineAggregator : pipelineAggregators) {
-            aggResult = pipelineAggregator.reduce(aggResult, reduceContext);
+        if (reduceContext.isFinalReduce()) {
+            for (PipelineAggregator pipelineAggregator : pipelineAggregators) {
+                aggResult = pipelineAggregator.reduce(aggResult, reduceContext);
+            }
         }
         return aggResult;
     }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java
index 085f18c0e1e78..ef268f8a5049c 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java
@@ -192,7 +192,7 @@ public InternalGeoHashGrid doReduce(List aggregations, Redu
             }
         }
 
-        final int size = (int) Math.min(requiredSize, buckets.size());
+        final int size = Math.toIntExact(reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size()));
         BucketPriorityQueue ordered = new BucketPriorityQueue(size);
         for (LongObjectPagedHashMap.Cursor> cursor : buckets) {
             List sameCellBuckets = cursor.value;
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
index f24fc5c127ec6..a8976aaa1ac77 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java
@@ -285,7 +285,7 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) {
                 if (top.current.key != key) {
                     // the key changes, reduce what we already buffered and reset the buffer for current buckets
                     final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
-                    if (reduced.getDocCount() >= minDocCount) {
+                    if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
                         reducedBuckets.add(reduced);
                     }
                     currentBuckets.clear();
@@ -306,7 +306,7 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) {
 
             if (currentBuckets.isEmpty() == false) {
                 final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
-                if (reduced.getDocCount() >= minDocCount) {
+                if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
                     reducedBuckets.add(reduced);
                 }
             }
@@ -382,7 +382,7 @@ public InternalAggregation doReduce(List aggregations, Redu
             addEmptyBuckets(reducedBuckets, reduceContext);
         }
 
-        if (order == InternalOrder.KEY_ASC) {
+        if (order == InternalOrder.KEY_ASC || reduceContext.isFinalReduce() == false) {
             // nothing to do, data are already sorted since shards return
             // sorted buckets and the merge-sort performed by reduceBuckets
             // maintains order
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
index eb90dfae732ad..e6e23d3a615a1 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
@@ -308,7 +308,7 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) {
                 if (top.current.key != key) {
                     // the key changes, reduce what we already buffered and reset the buffer for current buckets
                     final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
-                    if (reduced.getDocCount() >= minDocCount) {
+                    if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
                         reducedBuckets.add(reduced);
                     }
                     currentBuckets.clear();
@@ -329,7 +329,7 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) {
 
             if (currentBuckets.isEmpty() == false) {
                 final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext);
-                if (reduced.getDocCount() >= minDocCount) {
+                if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) {
                     reducedBuckets.add(reduced);
                 }
             }
@@ -400,7 +400,7 @@ public InternalAggregation doReduce(List aggregations, Redu
             addEmptyBuckets(reducedBuckets, reduceContext);
         }
 
-        if (order == InternalOrder.KEY_ASC) {
+        if (order == InternalOrder.KEY_ASC || reduceContext.isFinalReduce() == false) {
             // nothing to do, data are already sorted since shards return
             // sorted buckets and the merge-sort performed by reduceBuckets
             // maintains order
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java
index cdd1f8d19a7de..6fcee8e937e53 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java
@@ -196,15 +196,14 @@ public InternalAggregation doReduce(List aggregations, Redu
                         bucket.aggregations));
             }
         }
-
         SignificanceHeuristic heuristic = getSignificanceHeuristic().rewrite(reduceContext);
-        final int size = Math.min(requiredSize, buckets.size());
+        final int size = reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size());
         BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue<>(size);
         for (Map.Entry> entry : buckets.entrySet()) {
             List sameTermBuckets = entry.getValue();
             final B b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext);
             b.updateScore(heuristic);
-            if ((b.score > 0) && (b.subsetDf >= minDocCount)) {
+            if (((b.score > 0) && (b.subsetDf >= minDocCount)) || reduceContext.isFinalReduce() == false) {
                 ordered.insertWithOverflow(b);
             }
         }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
index 961e0a9228066..938b20d9fc892 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java
@@ -33,6 +33,7 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -248,8 +249,8 @@ public InternalAggregation doReduce(List aggregations, Redu
             }
         }
 
-        final int size = Math.min(requiredSize, buckets.size());
-        BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, order.comparator(null));
+        final int size = reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size());
+        final BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, order.comparator(null));
         for (List sameTermBuckets : buckets.values()) {
             final B b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext);
             if (b.docCountError != -1) {
@@ -259,7 +260,7 @@ public InternalAggregation doReduce(List aggregations, Redu
                     b.docCountError = sumDocCountError - b.docCountError;
                 }
             }
-            if (b.docCount >= minDocCount) {
+            if (b.docCount >= minDocCount || reduceContext.isFinalReduce() == false) {
                 B removed = ordered.insertWithOverflow(b);
                 if (removed != null) {
                     otherDocCount += removed.getDocCount();
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java
deleted file mode 100644
index ded69d9f75bfb..0000000000000
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.search.aggregations.metrics;
-
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.search.aggregations.InternalAggregation;
-import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-public abstract class InternalMetricsAggregation extends InternalAggregation {
-    protected InternalMetricsAggregation(String name, List pipelineAggregators, Map metaData) {
-        super(name, pipelineAggregators, metaData);
-    }
-
-    /**
-     * Read from a stream.
-     */
-    protected InternalMetricsAggregation(StreamInput in) throws IOException {
-        super(in);
-    }
-
-}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java
index 010e24346bfad..dba16397fc050 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java
@@ -20,6 +20,7 @@
 
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.search.DocValueFormat;
+import org.elasticsearch.search.aggregations.InternalAggregation;
 import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 
 import java.io.IOException;
@@ -27,7 +28,7 @@
 import java.util.Map;
 import java.util.Objects;
 
-public abstract class InternalNumericMetricsAggregation extends InternalMetricsAggregation {
+public abstract class InternalNumericMetricsAggregation extends InternalAggregation {
 
     private static final DocValueFormat DEFAULT_FORMAT = DocValueFormat.RAW;
 
@@ -118,7 +119,7 @@ public boolean equals(Object obj) {
             return false;
         }
         InternalNumericMetricsAggregation other = (InternalNumericMetricsAggregation) obj;
-        return super.equals(obj) && 
+        return super.equals(obj) &&
                 Objects.equals(format, other.format);
     }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java
index bdb9acd91abb2..2a3d03e43e624 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java
@@ -24,7 +24,6 @@
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.search.aggregations.InternalAggregation;
-import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation;
 import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 
 import java.io.IOException;
@@ -32,7 +31,7 @@
 import java.util.Map;
 import java.util.Objects;
 
-public class InternalGeoBounds extends InternalMetricsAggregation implements GeoBounds {
+public class InternalGeoBounds extends InternalAggregation implements GeoBounds {
     final double top;
     final double bottom;
     final double posLeft;
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java
index 06d9d369029af..a5a8058ed2835 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java
@@ -25,7 +25,6 @@
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.search.aggregations.InternalAggregation;
-import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation;
 import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 
 import java.io.IOException;
@@ -35,7 +34,7 @@
 /**
  * Serialization and merge logic for {@link GeoCentroidAggregator}.
  */
-public class InternalGeoCentroid extends InternalMetricsAggregation implements GeoCentroid {
+public class InternalGeoCentroid extends InternalAggregation implements GeoCentroid {
     protected final GeoPoint centroid;
     protected final long count;
 
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
index 6cb3b626f9185..bb8e1ac48d399 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
@@ -27,7 +27,6 @@
 import org.elasticsearch.script.Script;
 import org.elasticsearch.script.ScriptContext;
 import org.elasticsearch.search.aggregations.InternalAggregation;
-import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation;
 import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 
 import java.io.IOException;
@@ -37,11 +36,16 @@
 import java.util.List;
 import java.util.Map;
 
-public class InternalScriptedMetric extends InternalMetricsAggregation implements ScriptedMetric {
+public class InternalScriptedMetric extends InternalAggregation implements ScriptedMetric {
     private final Script reduceScript;
-    private final Object aggregation;
+    private final List aggregation;
 
     public InternalScriptedMetric(String name, Object aggregation, Script reduceScript, List pipelineAggregators,
+                                  Map metaData) {
+        this(name, Collections.singletonList(aggregation), reduceScript, pipelineAggregators, metaData);
+    }
+
+    private InternalScriptedMetric(String name, List aggregation, Script reduceScript, List pipelineAggregators,
             Map metaData) {
         super(name, pipelineAggregators, metaData);
         this.aggregation = aggregation;
@@ -54,13 +58,13 @@ public InternalScriptedMetric(String name, Object aggregation, Script reduceScri
     public InternalScriptedMetric(StreamInput in) throws IOException {
         super(in);
         reduceScript = in.readOptionalWriteable(Script::new);
-        aggregation = in.readGenericValue();
+        aggregation = Collections.singletonList(in.readGenericValue());
     }
 
     @Override
     protected void doWriteTo(StreamOutput out) throws IOException {
         out.writeOptionalWriteable(reduceScript);
-        out.writeGenericValue(aggregation);
+        out.writeGenericValue(aggregation());
     }
 
     @Override
@@ -70,7 +74,10 @@ public String getWriteableName() {
 
     @Override
     public Object aggregation() {
-        return aggregation;
+        if (aggregation.size() != 1) {
+            throw new IllegalStateException("aggregation was not reduced");
+        }
+        return aggregation.get(0);
     }
 
     @Override
@@ -78,11 +85,11 @@ public InternalAggregation doReduce(List aggregations, Redu
         List aggregationObjects = new ArrayList<>();
         for (InternalAggregation aggregation : aggregations) {
             InternalScriptedMetric mapReduceAggregation = (InternalScriptedMetric) aggregation;
-            aggregationObjects.add(mapReduceAggregation.aggregation());
+            aggregationObjects.addAll(mapReduceAggregation.aggregation);
         }
         InternalScriptedMetric firstAggregation = ((InternalScriptedMetric) aggregations.get(0));
-        Object aggregation;
-        if (firstAggregation.reduceScript != null) {
+        List aggregation;
+        if (firstAggregation.reduceScript != null && reduceContext.isFinalReduce()) {
             Map vars = new HashMap<>();
             vars.put("_aggs", aggregationObjects);
             if (firstAggregation.reduceScript.getParams() != null) {
@@ -91,13 +98,16 @@ public InternalAggregation doReduce(List aggregations, Redu
             CompiledScript compiledScript = reduceContext.scriptService().compile(
                 firstAggregation.reduceScript, ScriptContext.Standard.AGGS);
             ExecutableScript script = reduceContext.scriptService().executable(compiledScript, vars);
-            aggregation = script.run();
+            aggregation = Collections.singletonList(script.run());
+        } else if (reduceContext.isFinalReduce())  {
+            aggregation = Collections.singletonList(aggregationObjects);
         } else {
+            // if we are not an final reduce we have to maintain all the aggs from all the incoming one
+            // until we hit the final reduce phase.
             aggregation = aggregationObjects;
         }
         return new InternalScriptedMetric(firstAggregation.getName(), aggregation, firstAggregation.reduceScript, pipelineAggregators(),
                 getMetaData());
-
     }
 
     @Override
@@ -105,7 +115,7 @@ public Object getProperty(List path) {
         if (path.isEmpty()) {
             return this;
         } else if (path.size() == 1 && "value".equals(path.get(0))) {
-            return aggregation;
+            return aggregation();
         } else {
             throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path);
         }
@@ -113,7 +123,7 @@ public Object getProperty(List path) {
 
     @Override
     public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
-        return builder.field("value", aggregation);
+        return builder.field("value", aggregation());
     }
 
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java
index e060826c24c0b..08c9292d54e62 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java
@@ -29,6 +29,7 @@
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 public class InternalStats extends InternalNumericMetricsAggregation.MultiValue implements Stats {
     enum Metrics {
@@ -198,4 +199,18 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th
     protected XContentBuilder otherStatsToXCotent(XContentBuilder builder, Params params) throws IOException {
         return builder;
     }
+
+    @Override
+    protected int doHashCode() {
+        return Objects.hash(count, min, max, sum);
+    }
+
+    @Override
+    protected boolean doEquals(Object obj) {
+        InternalStats other = (InternalStats) obj;
+        return count == other.count &&
+            min == other.min &&
+            max == other.max &&
+            Double.compare(count, other.count) == 0;
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java
index 499111d56685d..d6faf5cbb78a0 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java
@@ -189,8 +189,8 @@ public InternalAggregation buildAggregation(long bucket) {
 
     @Override
     public InternalAggregation buildEmptyAggregation() {
-        return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d, sigma, format, pipelineAggregators(),
-                metaData());
+        return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d,
+            sigma, format, pipelineAggregators(), metaData());
     }
 
     @Override
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java
index d848001171c6b..370399bfbb8db 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java
@@ -29,6 +29,7 @@
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 public class InternalExtendedStats extends InternalStats implements ExtendedStats {
     enum Metrics {
@@ -90,6 +91,10 @@ public double value(String name) {
         return super.value(name);
     }
 
+    public double getSigma() {
+        return this.sigma;
+    }
+
     @Override
     public double getSumOfSquares() {
         return sumOfSqrs;
@@ -186,4 +191,17 @@ protected XContentBuilder otherStatsToXCotent(XContentBuilder builder, Params pa
         }
         return builder;
     }
+
+    @Override
+    protected int doHashCode() {
+        return Objects.hash(super.doHashCode(), sumOfSqrs, sigma);
+    }
+
+    @Override
+    protected boolean doEquals(Object obj) {
+        InternalExtendedStats other = (InternalExtendedStats) obj;
+        return super.doEquals(obj) &&
+            Double.compare(sumOfSqrs, other.sumOfSqrs) == 0 &&
+            Double.compare(sigma, other.sigma) == 0;
+    }
 }
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java
index baa8c45e140ca..1b32e6e9deeca 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java
@@ -27,11 +27,10 @@
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.lucene.Lucene;
 import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.SearchHit;
 import org.elasticsearch.search.SearchHits;
 import org.elasticsearch.search.aggregations.InternalAggregation;
-import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation;
 import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
-import org.elasticsearch.search.SearchHit;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -41,7 +40,7 @@
 /**
  * Results of the {@link TopHitsAggregator}.
  */
-public class InternalTopHits extends InternalMetricsAggregation implements TopHits {
+public class InternalTopHits extends InternalAggregation implements TopHits {
     private int from;
     private int size;
     private TopDocs topDocs;
@@ -96,7 +95,18 @@ int getSize() {
 
     @Override
     public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) {
-        SearchHits[] shardHits = new SearchHits[aggregations.size()];
+        final SearchHits[] shardHits = new SearchHits[aggregations.size()];
+        final int from;
+        final int size;
+        if (reduceContext.isFinalReduce()) {
+            from = this.from;
+            size = this.size;
+        } else {
+            // if we are not in the final reduce we need to ensure we maintain all possible elements during reduce
+            // hence for pagination we need to maintain all hits until we are in the final phase.
+            from = 0;
+            size = this.from + this.size;
+        }
 
         final TopDocs reducedTopDocs;
         final TopDocs[] shardDocs;
@@ -106,7 +116,7 @@ public InternalAggregation doReduce(List aggregations, Redu
             shardDocs = new TopFieldDocs[aggregations.size()];
             for (int i = 0; i < shardDocs.length; i++) {
                 InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i);
-                shardDocs[i] = (TopFieldDocs) topHitsAgg.topDocs;
+                shardDocs[i] = topHitsAgg.topDocs;
                 shardHits[i] = topHitsAgg.searchHits;
             }
             reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs);
@@ -130,7 +140,7 @@ public InternalAggregation doReduce(List aggregations, Redu
             } while (shardDocs[scoreDoc.shardIndex].scoreDocs[position] != scoreDoc);
             hits[i] = shardHits[scoreDoc.shardIndex].getAt(position);
         }
-        return new InternalTopHits(name, from, size, reducedTopDocs, new SearchHits(hits, reducedTopDocs.totalHits,
+        return new InternalTopHits(name, this.from, this.size, reducedTopDocs, new SearchHits(hits, reducedTopDocs.totalHits,
                 reducedTopDocs.getMaxScore()),
                 pipelineAggregators(), getMetaData());
     }
@@ -162,7 +172,7 @@ protected boolean doEquals(Object obj) {
             ScoreDoc thisDoc = topDocs.scoreDocs[d];
             ScoreDoc otherDoc = other.topDocs.scoreDocs[d];
             if (thisDoc.doc != otherDoc.doc) return false;
-            if (thisDoc.score != otherDoc.score) return false;
+            if (Double.compare(thisDoc.score, otherDoc.score) != 0) return false;
             if (thisDoc.shardIndex != otherDoc.shardIndex) return false;
             if (thisDoc instanceof FieldDoc) {
                 if (false == (otherDoc instanceof FieldDoc)) return false;
diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
index d396d14e9837b..391f6efe18bfe 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
@@ -38,7 +38,7 @@
 public class InternalSearchResponse implements Streamable, ToXContent {
 
     public static InternalSearchResponse empty() {
-        return new InternalSearchResponse(SearchHits.empty(), null, null, null, false, null);
+        return new InternalSearchResponse(SearchHits.empty(), null, null, null, false, null, 1);
     }
 
     private SearchHits hits;
@@ -53,17 +53,21 @@ public static InternalSearchResponse empty() {
 
     private Boolean terminatedEarly = null;
 
+    private int numReducePhases = 1;
+
     private InternalSearchResponse() {
     }
 
     public InternalSearchResponse(SearchHits hits, InternalAggregations aggregations, Suggest suggest,
-                                  SearchProfileShardResults profileResults, boolean timedOut, Boolean terminatedEarly) {
+                                  SearchProfileShardResults profileResults, boolean timedOut, Boolean terminatedEarly,
+                                  int numReducePhases) {
         this.hits = hits;
         this.aggregations = aggregations;
         this.suggest = suggest;
         this.profileResults = profileResults;
         this.timedOut = timedOut;
         this.terminatedEarly = terminatedEarly;
+        this.numReducePhases = numReducePhases;
     }
 
     public boolean timedOut() {
@@ -86,6 +90,13 @@ public Suggest suggest() {
         return suggest;
     }
 
+    /**
+     * Returns the number of reduce phases applied to obtain this search response
+     */
+    public int getNumReducePhases() {
+        return numReducePhases;
+    }
+
     /**
      * Returns the profile results for this search response (including all shards).
      * An empty map is returned if profiling was not enabled
@@ -132,6 +143,7 @@ public void readFrom(StreamInput in) throws IOException {
         timedOut = in.readBoolean();
         terminatedEarly = in.readOptionalBoolean();
         profileResults = in.readOptionalWriteable(SearchProfileShardResults::new);
+        numReducePhases = in.readVInt();
     }
 
     @Override
@@ -152,5 +164,6 @@ public void writeTo(StreamOutput out) throws IOException {
         out.writeBoolean(timedOut);
         out.writeOptionalBoolean(terminatedEarly);
         out.writeOptionalWriteable(profileResults);
+        out.writeVInt(numReducePhases);
     }
 }
diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
index 6817ae991a375..b68037b8dc6f2 100644
--- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
+++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
@@ -142,7 +142,7 @@ public static ThreadPoolType fromType(String type) {
 
     private final ScheduledThreadPoolExecutor scheduler;
 
-    private final EstimatedTimeThread estimatedTimeThread;
+    private final CachedTimeThread cachedTimeThread;
 
     static final ExecutorService DIRECT_EXECUTOR = EsExecutors.newDirectExecutorService();
 
@@ -213,16 +213,33 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui
         this.scheduler.setRemoveOnCancelPolicy(true);
 
         TimeValue estimatedTimeInterval = ESTIMATED_TIME_INTERVAL_SETTING.get(settings);
-        this.estimatedTimeThread = new EstimatedTimeThread(EsExecutors.threadName(settings, "[timer]"), estimatedTimeInterval.millis());
-        this.estimatedTimeThread.start();
+        this.cachedTimeThread = new CachedTimeThread(EsExecutors.threadName(settings, "[timer]"), estimatedTimeInterval.millis());
+        this.cachedTimeThread.start();
     }
 
-    public long estimatedTimeInMillis() {
-        return estimatedTimeThread.estimatedTimeInMillis();
+    /**
+     * Returns a value of milliseconds that may be used for relative time calculations.
+     *
+     * This method should only be used for calculating time deltas. For an epoch based
+     * timestamp, see {@link #absoluteTimeInMillis()}.
+     */
+    public long relativeTimeInMillis() {
+        return cachedTimeThread.relativeTimeInMillis();
+    }
+
+    /**
+     * Returns the value of milliseconds since UNIX epoch.
+     *
+     * This method should only be used for exact date/time formatting. For calculating
+     * time deltas that should not suffer from negative deltas, which are possible with
+     * this method, see {@link #relativeTimeInMillis()}.
+     */
+    public long absoluteTimeInMillis() {
+        return cachedTimeThread.absoluteTimeInMillis();
     }
 
     public Counter estimatedTimeInMillisCounter() {
-        return estimatedTimeThread.counter;
+        return cachedTimeThread.counter;
     }
 
     public ThreadPoolInfo info() {
@@ -342,8 +359,8 @@ public ScheduledFuture schedule(TimeValue delay, String executor, Runnable co
     }
 
     public void shutdown() {
-        estimatedTimeThread.running = false;
-        estimatedTimeThread.interrupt();
+        cachedTimeThread.running = false;
+        cachedTimeThread.interrupt();
         scheduler.shutdown();
         for (ExecutorHolder executor : executors.values()) {
             if (executor.executor() instanceof ThreadPoolExecutor) {
@@ -353,8 +370,8 @@ public void shutdown() {
     }
 
     public void shutdownNow() {
-        estimatedTimeThread.running = false;
-        estimatedTimeThread.interrupt();
+        cachedTimeThread.running = false;
+        cachedTimeThread.interrupt();
         scheduler.shutdownNow();
         for (ExecutorHolder executor : executors.values()) {
             if (executor.executor() instanceof ThreadPoolExecutor) {
@@ -371,7 +388,7 @@ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedE
             }
         }
 
-        estimatedTimeThread.join(unit.toMillis(timeout));
+        cachedTimeThread.join(unit.toMillis(timeout));
         return result;
     }
 
@@ -471,29 +488,50 @@ public String toString() {
         }
     }
 
-    static class EstimatedTimeThread extends Thread {
+    /**
+     * A thread to cache millisecond time values from
+     * {@link System#nanoTime()} and {@link System#currentTimeMillis()}.
+     *
+     * The values are updated at a specified interval.
+     */
+    static class CachedTimeThread extends Thread {
 
         final long interval;
         final TimeCounter counter;
         volatile boolean running = true;
-        volatile long estimatedTimeInMillis;
+        volatile long relativeMillis;
+        volatile long absoluteMillis;
 
-        EstimatedTimeThread(String name, long interval) {
+        CachedTimeThread(String name, long interval) {
             super(name);
             this.interval = interval;
-            this.estimatedTimeInMillis = TimeValue.nsecToMSec(System.nanoTime());
+            this.relativeMillis = TimeValue.nsecToMSec(System.nanoTime());
+            this.absoluteMillis = System.currentTimeMillis();
             this.counter = new TimeCounter();
             setDaemon(true);
         }
 
-        public long estimatedTimeInMillis() {
-            return this.estimatedTimeInMillis;
+        /**
+         * Return the current time used for relative calculations. This is
+         * {@link System#nanoTime()} truncated to milliseconds.
+         */
+        long relativeTimeInMillis() {
+            return relativeMillis;
+        }
+
+        /**
+         * Return the current epoch time, used to find absolute time. This is
+         * a cached version of {@link System#currentTimeMillis()}.
+         */
+        long absoluteTimeInMillis() {
+            return absoluteMillis;
         }
 
         @Override
         public void run() {
             while (running) {
-                estimatedTimeInMillis = TimeValue.nsecToMSec(System.nanoTime());
+                relativeMillis = TimeValue.nsecToMSec(System.nanoTime());
+                absoluteMillis = System.currentTimeMillis();
                 try {
                     Thread.sleep(interval);
                 } catch (InterruptedException e) {
@@ -512,7 +550,7 @@ public long addAndGet(long delta) {
 
             @Override
             public long get() {
-                return estimatedTimeInMillis;
+                return relativeMillis;
             }
         }
     }
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java
new file mode 100644
index 0000000000000..b23fbe4438f4e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.DocWriteRequest;
+import org.elasticsearch.action.DocWriteResponse;
+import org.elasticsearch.action.delete.DeleteResponseTests;
+import org.elasticsearch.action.index.IndexResponseTests;
+import org.elasticsearch.action.update.UpdateResponseTests;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+
+import static org.elasticsearch.ElasticsearchExceptionTests.randomExceptions;
+import static org.elasticsearch.action.bulk.BulkItemResponseTests.assertBulkItemResponse;
+import static org.elasticsearch.action.bulk.BulkResponse.NO_INGEST_TOOK;
+import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
+
+public class BulkResponseTests extends ESTestCase {
+
+    public void testToAndFromXContent() throws IOException {
+        XContentType xContentType = randomFrom(XContentType.values());
+        boolean humanReadable = randomBoolean();
+
+        long took = randomFrom(randomNonNegativeLong(), -1L);
+        long ingestTook = randomFrom(randomNonNegativeLong(), NO_INGEST_TOOK);
+        int nbBulkItems = randomIntBetween(1, 10);
+
+        BulkItemResponse[] bulkItems = new BulkItemResponse[nbBulkItems];
+        BulkItemResponse[] expectedBulkItems = new BulkItemResponse[nbBulkItems];
+
+        for (int i = 0; i < nbBulkItems; i++) {
+            DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values());
+
+            if (frequently()) {
+                Tuple randomDocWriteResponses = null;
+                if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
+                    randomDocWriteResponses = IndexResponseTests.randomIndexResponse();
+                } else if (opType == DocWriteRequest.OpType.DELETE) {
+                    randomDocWriteResponses = DeleteResponseTests.randomDeleteResponse();
+                } else if (opType == DocWriteRequest.OpType.UPDATE) {
+                    randomDocWriteResponses = UpdateResponseTests.randomUpdateResponse(xContentType);
+                } else {
+                    fail("Test does not support opType [" + opType + "]");
+                }
+
+                bulkItems[i] = new BulkItemResponse(i, opType, randomDocWriteResponses.v1());
+                expectedBulkItems[i] = new BulkItemResponse(i, opType, randomDocWriteResponses.v2());
+            } else {
+                String index = randomAsciiOfLength(5);
+                String type = randomAsciiOfLength(5);
+                String id = randomAsciiOfLength(5);
+
+                Tuple failures = randomExceptions();
+                bulkItems[i] = new BulkItemResponse(i, opType, new BulkItemResponse.Failure(index, type, id, (Exception) failures.v1()));
+                expectedBulkItems[i] = new BulkItemResponse(i, opType, new BulkItemResponse.Failure(index, type, id, failures.v2()));
+            }
+        }
+
+        BulkResponse bulkResponse = new BulkResponse(bulkItems, took, ingestTook);
+        BytesReference originalBytes = toXContent(bulkResponse, xContentType, humanReadable);
+
+        if (randomBoolean()) {
+            try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) {
+                originalBytes = shuffleXContent(parser, randomBoolean()).bytes();
+            }
+        }
+
+        BulkResponse parsedBulkResponse;
+        try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) {
+            parsedBulkResponse = BulkResponse.fromXContent(parser);
+            assertNull(parser.nextToken());
+        }
+
+        assertEquals(took, parsedBulkResponse.getTookInMillis());
+        assertEquals(ingestTook, parsedBulkResponse.getIngestTookInMillis());
+        assertEquals(expectedBulkItems.length, parsedBulkResponse.getItems().length);
+
+        for (int i = 0; i < expectedBulkItems.length; i++) {
+            assertBulkItemResponse(expectedBulkItems[i], parsedBulkResponse.getItems()[i]);
+        }
+
+        BytesReference finalBytes = toXContent(parsedBulkResponse, xContentType, humanReadable);
+        BytesReference expectedFinalBytes = toXContent(parsedBulkResponse, xContentType, humanReadable);
+        assertToXContentEquivalent(expectedFinalBytes, finalBytes, xContentType);
+    }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java
index 92ce5040e376b..1fafd847a3f20 100644
--- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java
@@ -446,7 +446,7 @@ public ScheduledFuture schedule(TimeValue delay, String name, Runnable comman
         // Now we can simulate a response and check the delay that we used for the task
         SearchHit hit = new SearchHit(0, "id", new Text("type"), emptyMap());
         SearchHits hits = new SearchHits(new SearchHit[] { hit }, 0, 0);
-        InternalSearchResponse internalResponse = new InternalSearchResponse(hits, null, null, null, false, false);
+        InternalSearchResponse internalResponse = new InternalSearchResponse(hits, null, null, null, false, false, 1);
         SearchResponse searchResponse = new SearchResponse(internalResponse, scrollId(), 5, 4, randomLong(), null);
 
         if (randomBoolean()) {
diff --git a/core/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java b/core/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java
index d34bf180e806c..6995ad93f25fd 100644
--- a/core/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java
+++ b/core/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java
@@ -46,7 +46,7 @@ public void testCollect() throws InterruptedException {
                 runnable.run();
             }
         };
-        CountedCollector collector = new CountedCollector<>(results, numResultsExpected,
+        CountedCollector collector = new CountedCollector<>(results::set, numResultsExpected,
             latch::countDown, context);
         for (int i = 0; i < numResultsExpected; i++) {
             int shardID = i;
diff --git a/core/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/core/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java
index f094db086f84e..ba01559e0f063 100644
--- a/core/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java
+++ b/core/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java
@@ -95,7 +95,7 @@ public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest
             (response) -> new SearchPhase("test") {
             @Override
             public void run() throws IOException {
-                responseRef.set(response);
+                responseRef.set(response.results);
             }
         }, mockSearchPhaseContext);
         assertEquals("dfs_query", phase.getName());
@@ -147,7 +147,7 @@ public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest
             (response) -> new SearchPhase("test") {
                 @Override
                 public void run() throws IOException {
-                    responseRef.set(response);
+                    responseRef.set(response.results);
                 }
             }, mockSearchPhaseContext);
         assertEquals("dfs_query", phase.getName());
@@ -202,7 +202,7 @@ public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest
             (response) -> new SearchPhase("test") {
                 @Override
                 public void run() throws IOException {
-                    responseRef.set(response);
+                    responseRef.set(response.results);
                 }
             }, mockSearchPhaseContext);
         assertEquals("dfs_query", phase.getName());
diff --git a/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java
index 0ac452e62bdaa..20e295561bbcd 100644
--- a/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java
+++ b/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java
@@ -79,7 +79,7 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL
 
 
                     InternalSearchResponse internalSearchResponse = new InternalSearchResponse(collapsedHits,
-                        null, null, null, false, null);
+                        null, null, null, false, null, 1);
                     SearchResponse response = mockSearchPhaseContext.buildSearchResponse(internalSearchResponse, null);
                     listener.onResponse(new MultiSearchResponse(new MultiSearchResponse.Item[]{
                         new MultiSearchResponse.Item(response, null)
@@ -91,7 +91,7 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL
             SearchHits hits = new SearchHits(new SearchHit[]{new SearchHit(1, "ID", new Text("type"),
                 Collections.singletonMap("someField", new SearchHitField("someField", Collections.singletonList(collapseValue))))},
                 1, 1.0F);
-            InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null);
+            InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1);
             SearchResponse response = mockSearchPhaseContext.buildSearchResponse(internalSearchResponse, null);
             AtomicReference reference = new AtomicReference<>();
             ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, response, r ->
@@ -132,7 +132,7 @@ public void testFailOneItemFailsEntirePhase() throws IOException {
             void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) {
                 assertTrue(executedMultiSearch.compareAndSet(false, true));
                 InternalSearchResponse internalSearchResponse = new InternalSearchResponse(collapsedHits,
-                    null, null, null, false, null);
+                    null, null, null, false, null, 1);
                 SearchResponse response = mockSearchPhaseContext.buildSearchResponse(internalSearchResponse, null);
                 listener.onResponse(new MultiSearchResponse(new MultiSearchResponse.Item[]{
                     new MultiSearchResponse.Item(null, new RuntimeException("boom")),
@@ -146,7 +146,7 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL
             new SearchHit(2, "ID2", new Text("type"),
                 Collections.singletonMap("someField", new SearchHitField("someField", Collections.singletonList(collapseValue))))}, 1,
             1.0F);
-        InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null);
+        InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1);
         SearchResponse response = mockSearchPhaseContext.buildSearchResponse(internalSearchResponse, null);
         AtomicReference reference = new AtomicReference<>();
         ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, response, r ->
@@ -180,7 +180,7 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL
             Collections.singletonMap("someField", new SearchHitField("someField", Collections.singletonList(null)))),
             new SearchHit(2, "ID2", new Text("type"),
                 Collections.singletonMap("someField", new SearchHitField("someField", Collections.singletonList(null))))}, 1, 1.0F);
-        InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null);
+        InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1);
         SearchResponse response = mockSearchPhaseContext.buildSearchResponse(internalSearchResponse, null);
         AtomicReference reference = new AtomicReference<>();
         ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, response, r ->
diff --git a/core/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/core/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java
index 67a67f720e14a..14c2eb6f63fd2 100644
--- a/core/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java
+++ b/core/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java
@@ -46,7 +46,10 @@
 public class FetchSearchPhaseTests extends ESTestCase {
 
     public void testShortcutQueryAndFetchOptimization() throws IOException {
-        AtomicArray results = new AtomicArray<>(1);
+        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
+        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1);
+        InitialSearchPhase.SearchPhaseResults results =
+            controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 1);
         AtomicReference responseRef = new AtomicReference<>();
         boolean hasHits = randomBoolean();
         final int numHits;
@@ -56,14 +59,12 @@ public void testShortcutQueryAndFetchOptimization() throws IOException {
             queryResult.size(1);
             FetchSearchResult fetchResult = new FetchSearchResult();
             fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit(42)}, 1, 1.0F));
-            results.set(0, new QueryFetchSearchResult(queryResult, fetchResult));
+            results.consumeResult(0, new QueryFetchSearchResult(queryResult, fetchResult));
             numHits = 1;
         } else {
             numHits = 0;
         }
 
-        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
-        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1);
         FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext,
             (searchResponse) -> new SearchPhase("test") {
             @Override
@@ -83,20 +84,22 @@ public void run() throws IOException {
     }
 
     public void testFetchTwoDocument() throws IOException {
-        AtomicArray results = new AtomicArray<>(2);
+        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2);
+        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
+        InitialSearchPhase.SearchPhaseResults results =
+            controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2);
         AtomicReference responseRef = new AtomicReference<>();
         int resultSetSize = randomIntBetween(2, 10);
         QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0));
         queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]);
         queryResult.size(resultSetSize); // the size of the result set
-        results.set(0, queryResult);
+        results.consumeResult(0, queryResult);
 
         queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1));
         queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]);
         queryResult.size(resultSetSize);
-        results.set(1, queryResult);
+        results.consumeResult(1, queryResult);
 
-        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
         SearchTransportService searchTransportService = new SearchTransportService(
             Settings.builder().put("search.remote.connect", false).build(), null,  null) {
             @Override
@@ -112,7 +115,6 @@ public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRe
                 listener.onResponse(fetchResult);
             }
         };
-        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2);
         mockSearchPhaseContext.searchTransport = searchTransportService;
         FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext,
             (searchResponse) -> new SearchPhase("test") {
@@ -134,20 +136,22 @@ public void run() throws IOException {
     }
 
     public void testFailFetchOneDoc() throws IOException {
-        AtomicArray results = new AtomicArray<>(2);
+        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2);
+        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
+        InitialSearchPhase.SearchPhaseResults results =
+            controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2);
         AtomicReference responseRef = new AtomicReference<>();
         int resultSetSize = randomIntBetween(2, 10);
         QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0));
         queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]);
         queryResult.size(resultSetSize); // the size of the result set
-        results.set(0, queryResult);
+        results.consumeResult(0, queryResult);
 
         queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1));
         queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]);
         queryResult.size(resultSetSize);
-        results.set(1, queryResult);
+        results.consumeResult(1, queryResult);
 
-        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
         SearchTransportService searchTransportService = new SearchTransportService(
             Settings.builder().put("search.remote.connect", false).build(), null,  null) {
             @Override
@@ -163,7 +167,6 @@ public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRe
 
             }
         };
-        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2);
         mockSearchPhaseContext.searchTransport = searchTransportService;
         FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext,
             (searchResponse) -> new SearchPhase("test") {
@@ -190,15 +193,17 @@ public void testFetchDocsConcurrently() throws IOException, InterruptedException
         int resultSetSize = randomIntBetween(0, 100);
         // we use at least 2 hits otherwise this is subject to single shard optimization and we trip an assert...
         int numHits = randomIntBetween(2, 100); // also numshards --> 1 hit per shard
-        AtomicArray results = new AtomicArray<>(numHits);
+        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
+        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(numHits);
+        InitialSearchPhase.SearchPhaseResults results =
+            controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), numHits);
         AtomicReference responseRef = new AtomicReference<>();
         for (int i = 0; i < numHits; i++) {
             QuerySearchResult queryResult = new QuerySearchResult(i, new SearchShardTarget("node1", new Index("test", "na"), 0));
             queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(i+1, i)}, i), new DocValueFormat[0]);
             queryResult.size(resultSetSize); // the size of the result set
-            results.set(i, queryResult);
+            results.consumeResult(i, queryResult);
         }
-        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
         SearchTransportService searchTransportService = new SearchTransportService(
             Settings.builder().put("search.remote.connect", false).build(), null,  null) {
             @Override
@@ -211,7 +216,6 @@ public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRe
                 }).start();
             }
         };
-        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(numHits);
         mockSearchPhaseContext.searchTransport = searchTransportService;
         CountDownLatch latch = new CountDownLatch(1);
         FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext,
@@ -243,20 +247,22 @@ public void run() throws IOException {
     }
 
     public void testExceptionFailsPhase() throws IOException {
-        AtomicArray results = new AtomicArray<>(2);
+        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2);
+        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
+        InitialSearchPhase.SearchPhaseResults results =
+            controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2);
         AtomicReference responseRef = new AtomicReference<>();
         int resultSetSize = randomIntBetween(2, 10);
         QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0));
         queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]);
         queryResult.size(resultSetSize); // the size of the result set
-        results.set(0, queryResult);
+        results.consumeResult(0, queryResult);
 
         queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1));
         queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]);
         queryResult.size(resultSetSize);
-        results.set(1, queryResult);
+        results.consumeResult(1, queryResult);
         AtomicInteger numFetches = new AtomicInteger(0);
-        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
         SearchTransportService searchTransportService = new SearchTransportService(
             Settings.builder().put("search.remote.connect", false).build(), null,  null) {
             @Override
@@ -275,7 +281,6 @@ public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRe
                 listener.onResponse(fetchResult);
             }
         };
-        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2);
         mockSearchPhaseContext.searchTransport = searchTransportService;
         FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext,
             (searchResponse) -> new SearchPhase("test") {
@@ -293,20 +298,22 @@ public void run() throws IOException {
     }
 
     public void testCleanupIrrelevantContexts() throws IOException { // contexts that are not fetched should be cleaned up
-        AtomicArray results = new AtomicArray<>(2);
+        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2);
+        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
+        InitialSearchPhase.SearchPhaseResults results =
+            controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2);
         AtomicReference responseRef = new AtomicReference<>();
         int resultSetSize = 1;
         QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0));
         queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]);
         queryResult.size(resultSetSize); // the size of the result set
-        results.set(0, queryResult);
+        results.consumeResult(0, queryResult);
 
         queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1));
         queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]);
         queryResult.size(resultSetSize);
-        results.set(1, queryResult);
+        results.consumeResult(1, queryResult);
 
-        SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
         SearchTransportService searchTransportService = new SearchTransportService(
             Settings.builder().put("search.remote.connect", false).build(), null,  null) {
             @Override
@@ -321,7 +328,6 @@ public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRe
                 listener.onResponse(fetchResult);
             }
         };
-        MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2);
         mockSearchPhaseContext.searchTransport = searchTransportService;
         FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext,
             (searchResponse) -> new SearchPhase("test") {
diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java
index 972d0957dce7c..9b7fad265bfb3 100644
--- a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java
@@ -32,13 +32,11 @@
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.settings.ClusterSettings;
 import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.util.concurrent.AtomicArray;
 import org.elasticsearch.index.Index;
 import org.elasticsearch.index.shard.ShardId;
 import org.elasticsearch.search.SearchPhaseResult;
 import org.elasticsearch.search.SearchShardTarget;
 import org.elasticsearch.search.internal.AliasFilter;
-import org.elasticsearch.search.internal.ShardSearchTransportRequest;
 import org.elasticsearch.test.ESTestCase;
 import org.elasticsearch.transport.Transport;
 import org.elasticsearch.transport.TransportException;
@@ -53,7 +51,6 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.Executor;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -96,7 +93,8 @@ public void sendFreeContext(Transport.Connection connection, long contextId, Sea
         lookup.put(replicaNode.getId(), new MockConnection(replicaNode));
         Map aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY));
         AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction("test", logger, transportService,
-            lookup::get, aliasFilters, Collections.emptyMap(), null, request, responseListener, shardsIter, 0, 0, null) {
+            lookup::get, aliasFilters, Collections.emptyMap(), null, request, responseListener, shardsIter, 0, 0, null,
+            new InitialSearchPhase.SearchPhaseResults<>(shardsIter.size())) {
             TestSearchResponse response = new TestSearchResponse();
 
             @Override
@@ -115,12 +113,12 @@ protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, Ac
             }
 
             @Override
-            protected SearchPhase getNextPhase(AtomicArray results, SearchPhaseContext context) {
+            protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) {
                 return new SearchPhase("test") {
                     @Override
                     public void run() throws IOException {
-                        for (int i = 0; i < results.length(); i++) {
-                            TestSearchPhaseResult result = results.get(i);
+                        for (int i = 0; i < results.getNumShards(); i++) {
+                            TestSearchPhaseResult result = results.results.get(i);
                             assertEquals(result.node.getId(), result.shardTarget().getNodeId());
                             sendReleaseSearchContext(result.id(), new MockConnection(result.node));
                         }
diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java
index 1686a3c6de27d..ee68f2f1f9808 100644
--- a/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java
+++ b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java
@@ -26,7 +26,12 @@
 import org.elasticsearch.common.util.BigArrays;
 import org.elasticsearch.common.util.concurrent.AtomicArray;
 import org.elasticsearch.index.Index;
+import org.elasticsearch.search.DocValueFormat;
 import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.metrics.max.InternalMax;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
 import org.elasticsearch.search.fetch.FetchSearchResult;
 import org.elasticsearch.search.SearchHit;
 import org.elasticsearch.search.SearchHits;
@@ -45,10 +50,13 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.stream.Collectors;
 
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.not;
 
 public class SearchPhaseControllerTests extends ESTestCase {
     private SearchPhaseController searchPhaseController;
@@ -65,7 +73,7 @@ public void testSort() throws Exception {
         }
         int nShards = randomIntBetween(1, 20);
         int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2);
-        AtomicArray results = generateQueryResults(nShards, suggestions, queryResultSize);
+        AtomicArray results = generateQueryResults(nShards, suggestions, queryResultSize, false);
         ScoreDoc[] sortedDocs = searchPhaseController.sortDocs(true, results);
         int accumulatedLength = Math.min(queryResultSize, getTotalQueryHits(results));
         for (Suggest.Suggestion suggestion : reducedSuggest(results)) {
@@ -75,6 +83,18 @@ public void testSort() throws Exception {
         assertThat(sortedDocs.length, equalTo(accumulatedLength));
     }
 
+    public void testSortIsIdempotent() throws IOException {
+        int nShards = randomIntBetween(1, 20);
+        int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2);
+        AtomicArray results = generateQueryResults(nShards, Collections.emptyList(), queryResultSize,
+            randomBoolean() || true);
+        boolean ignoreFrom = randomBoolean();
+        ScoreDoc[] sortedDocs = searchPhaseController.sortDocs(ignoreFrom, results);
+
+        ScoreDoc[] sortedDocs2 = searchPhaseController.sortDocs(ignoreFrom, results);
+        assertArrayEquals(sortedDocs, sortedDocs2);
+    }
+
     public void testMerge() throws IOException {
         List suggestions = new ArrayList<>();
         for (int i = 0; i < randomIntBetween(1, 5); i++) {
@@ -82,7 +102,7 @@ public void testMerge() throws IOException {
         }
         int nShards = randomIntBetween(1, 20);
         int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2);
-        AtomicArray queryResults = generateQueryResults(nShards, suggestions, queryResultSize);
+        AtomicArray queryResults = generateQueryResults(nShards, suggestions, queryResultSize, false);
 
         // calculate offsets and score doc array
         List mergedScoreDocs = new ArrayList<>();
@@ -119,7 +139,7 @@ public void testMerge() throws IOException {
 
     private AtomicArray generateQueryResults(int nShards,
                                                                         List suggestions,
-                                                                        int searchHitsSize) {
+                                                                        int searchHitsSize, boolean useConstantScore) {
         AtomicArray queryResults = new AtomicArray<>(nShards);
         for (int shardIndex = 0; shardIndex < nShards; shardIndex++) {
             QuerySearchResult querySearchResult = new QuerySearchResult(shardIndex,
@@ -130,7 +150,7 @@ private AtomicArray generateQueryResults(int nShards,
                 ScoreDoc[] scoreDocs = new ScoreDoc[nDocs];
                 float maxScore = 0F;
                 for (int i = 0; i < nDocs; i++) {
-                    float score = Math.abs(randomFloat());
+                    float score = useConstantScore ? 1.0F : Math.abs(randomFloat());
                     scoreDocs[i] = new ScoreDoc(i, score);
                     if (score > maxScore) {
                         maxScore = score;
@@ -230,4 +250,101 @@ private AtomicArray generateFetchResults(int nShards,
         }
         return fetchResults;
     }
+
+    public void testConsumer() {
+        int bufferSize = randomIntBetween(2, 3);
+        SearchRequest request = new SearchRequest();
+        request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")));
+        request.setBatchedReduceSize(bufferSize);
+        InitialSearchPhase.SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults(request, 3);
+        QuerySearchResult result = new QuerySearchResult(0, new SearchShardTarget("node", new Index("a", "b"), 0));
+        result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]);
+        InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 1.0D, DocValueFormat.RAW,
+            Collections.emptyList(), Collections.emptyMap())));
+        result.aggregations(aggs);
+        consumer.consumeResult(0, result);
+
+        result = new QuerySearchResult(1, new SearchShardTarget("node", new Index("a", "b"), 0));
+        result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]);
+        aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 3.0D, DocValueFormat.RAW,
+            Collections.emptyList(), Collections.emptyMap())));
+        result.aggregations(aggs);
+        consumer.consumeResult(2, result);
+
+        result = new QuerySearchResult(1, new SearchShardTarget("node", new Index("a", "b"), 0));
+        result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]);
+        aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 2.0D, DocValueFormat.RAW,
+            Collections.emptyList(), Collections.emptyMap())));
+        result.aggregations(aggs);
+        consumer.consumeResult(1, result);
+        int numTotalReducePhases = 1;
+        if (bufferSize == 2) {
+            assertThat(consumer, instanceOf(SearchPhaseController.QueryPhaseResultConsumer.class));
+            assertEquals(1, ((SearchPhaseController.QueryPhaseResultConsumer)consumer).getNumReducePhases());
+            assertEquals(2, ((SearchPhaseController.QueryPhaseResultConsumer)consumer).getNumBuffered());
+            numTotalReducePhases++;
+        } else {
+            assertThat(consumer, not(instanceOf(SearchPhaseController.QueryPhaseResultConsumer.class)));
+        }
+
+        SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
+        assertEquals(numTotalReducePhases, reduce.numReducePhases);
+        InternalMax max = (InternalMax) reduce.aggregations.asList().get(0);
+        assertEquals(3.0D, max.getValue(), 0.0D);
+    }
+
+    public void testConsumerConcurrently() throws InterruptedException {
+        int expectedNumResults = randomIntBetween(1, 100);
+        int bufferSize = randomIntBetween(2, 200);
+
+        SearchRequest request = new SearchRequest();
+        request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")));
+        request.setBatchedReduceSize(bufferSize);
+        InitialSearchPhase.SearchPhaseResults consumer =
+            searchPhaseController.newSearchPhaseResults(request, expectedNumResults);
+        AtomicInteger max = new AtomicInteger();
+        CountDownLatch latch = new CountDownLatch(expectedNumResults);
+        for (int i = 0; i < expectedNumResults; i++) {
+            int id = i;
+            Thread t = new Thread(() -> {
+                int number = randomIntBetween(1, 1000);
+                max.updateAndGet(prev -> Math.max(prev, number));
+                QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new Index("a", "b"), id));
+                result.topDocs(new TopDocs(id, new ScoreDoc[0], 0.0F), new DocValueFormat[0]);
+                InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", (double) number,
+                    DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap())));
+                result.aggregations(aggs);
+                consumer.consumeResult(id, result);
+                latch.countDown();
+
+            });
+            t.start();
+        }
+        latch.await();
+        SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
+        InternalMax internalMax = (InternalMax) reduce.aggregations.asList().get(0);
+        assertEquals(max.get(), internalMax.getValue(), 0.0D);
+    }
+
+    public void testNewSearchPhaseResults() {
+        for (int i = 0; i < 10; i++) {
+            int expectedNumResults = randomIntBetween(1, 10);
+            int bufferSize = randomIntBetween(2, 10);
+            SearchRequest request = new SearchRequest();
+            final boolean hasAggs;
+            if ((hasAggs = randomBoolean())) {
+                request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")));
+            }
+            request.setBatchedReduceSize(bufferSize);
+            InitialSearchPhase.SearchPhaseResults consumer
+                = searchPhaseController.newSearchPhaseResults(request, expectedNumResults);
+            if (hasAggs && expectedNumResults > bufferSize) {
+                assertThat("expectedNumResults: " + expectedNumResults + " bufferSize: " + bufferSize,
+                    consumer, instanceOf(SearchPhaseController.QueryPhaseResultConsumer.class));
+            } else {
+                assertThat("expectedNumResults: " + expectedNumResults + " bufferSize: " + bufferSize,
+                    consumer, not(instanceOf(SearchPhaseController.QueryPhaseResultConsumer.class)));
+            }
+        }
+    }
 }
diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
index e1133542e4102..4b11697c16d9f 100644
--- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
@@ -22,10 +22,13 @@
 import org.elasticsearch.Version;
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.io.stream.Streamable;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.XContentFactory;
 import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.common.xcontent.json.JsonXContent;
 import org.elasticsearch.env.Environment;
 import org.elasticsearch.index.get.GetResult;
@@ -38,6 +41,7 @@
 import org.elasticsearch.script.ScriptSettings;
 import org.elasticsearch.script.ScriptType;
 import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.RandomObjects;
 import org.elasticsearch.watcher.ResourceWatcherService;
 
 import java.io.IOException;
@@ -48,13 +52,16 @@
 import java.util.function.Function;
 
 import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
 import static org.hamcrest.Matchers.arrayContaining;
 import static org.hamcrest.Matchers.equalTo;
 import static org.hamcrest.Matchers.instanceOf;
 import static org.hamcrest.Matchers.notNullValue;
 
 public class UpdateRequestTests extends ESTestCase {
-    public void testUpdateRequest() throws Exception {
+
+    public void testFromXContent() throws Exception {
         UpdateRequest request = new UpdateRequest("test", "type", "1");
         // simple script
         request.fromXContent(createParser(XContentFactory.jsonBuilder()
@@ -314,4 +321,83 @@ public void testNowInScript() throws IOException {
             assertThat(action, instanceOf(IndexRequest.class));
         }
     }
+
+    public void testToAndFromXContent() throws IOException {
+        UpdateRequest updateRequest = new UpdateRequest();
+        updateRequest.detectNoop(randomBoolean());
+
+        if (randomBoolean()) {
+            XContentType xContentType = randomFrom(XContentType.values());
+            BytesReference source = RandomObjects.randomSource(random(), xContentType);
+            updateRequest.doc(new IndexRequest().source(source, xContentType));
+            updateRequest.docAsUpsert(randomBoolean());
+        } else {
+            ScriptType scriptType = randomFrom(ScriptType.values());
+            String scriptLang = (scriptType != ScriptType.STORED) ? randomAsciiOfLength(10) : null;
+            String scriptIdOrCode = randomAsciiOfLength(10);
+            int nbScriptParams = randomIntBetween(0, 5);
+            Map scriptParams = new HashMap<>(nbScriptParams);
+            for (int i = 0; i < nbScriptParams; i++) {
+                scriptParams.put(randomAsciiOfLength(5), randomAsciiOfLength(5));
+            }
+            updateRequest.script(new Script(scriptType, scriptLang, scriptIdOrCode, scriptParams));
+            updateRequest.scriptedUpsert(randomBoolean());
+        }
+        if (randomBoolean()) {
+            XContentType xContentType = randomFrom(XContentType.values());
+            BytesReference source = RandomObjects.randomSource(random(), xContentType);
+            updateRequest.upsert(new IndexRequest().source(source, xContentType));
+        }
+        if (randomBoolean()) {
+            String[] fields = new String[randomIntBetween(0, 5)];
+            for (int i = 0; i < fields.length; i++) {
+                fields[i] = randomAsciiOfLength(5);
+            }
+            updateRequest.fields(fields);
+        }
+        if (randomBoolean()) {
+            if (randomBoolean()) {
+                updateRequest.fetchSource(randomBoolean());
+            } else {
+                String[] includes = new String[randomIntBetween(0, 5)];
+                for (int i = 0; i < includes.length; i++) {
+                    includes[i] = randomAsciiOfLength(5);
+                }
+                String[] excludes = new String[randomIntBetween(0, 5)];
+                for (int i = 0; i < excludes.length; i++) {
+                    excludes[i] = randomAsciiOfLength(5);
+                }
+                if (randomBoolean()) {
+                    updateRequest.fetchSource(includes, excludes);
+                }
+            }
+        }
+
+        XContentType xContentType = randomFrom(XContentType.values());
+        boolean humanReadable = randomBoolean();
+        BytesReference originalBytes = XContentHelper.toXContent(updateRequest, xContentType, humanReadable);
+
+        if (randomBoolean()) {
+            try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) {
+                originalBytes = shuffleXContent(parser, randomBoolean()).bytes();
+            }
+        }
+
+        UpdateRequest parsedUpdateRequest = new UpdateRequest();
+        try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) {
+            parsedUpdateRequest.fromXContent(parser);
+            assertNull(parser.nextToken());
+        }
+
+        assertEquals(updateRequest.detectNoop(), parsedUpdateRequest.detectNoop());
+        assertEquals(updateRequest.docAsUpsert(), parsedUpdateRequest.docAsUpsert());
+        assertEquals(updateRequest.docAsUpsert(), parsedUpdateRequest.docAsUpsert());
+        assertEquals(updateRequest.script(), parsedUpdateRequest.script());
+        assertEquals(updateRequest.scriptedUpsert(), parsedUpdateRequest.scriptedUpsert());
+        assertArrayEquals(updateRequest.fields(), parsedUpdateRequest.fields());
+        assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource());
+
+        BytesReference finalBytes = toXContent(parsedUpdateRequest, xContentType, humanReadable);
+        assertToXContentEquivalent(originalBytes, finalBytes, xContentType);
+    }
 }
diff --git a/core/src/test/java/org/elasticsearch/common/blobstore/BlobPathTests.java b/core/src/test/java/org/elasticsearch/common/blobstore/BlobPathTests.java
index 09225914644e0..ec846ca30690e 100644
--- a/core/src/test/java/org/elasticsearch/common/blobstore/BlobPathTests.java
+++ b/core/src/test/java/org/elasticsearch/common/blobstore/BlobPathTests.java
@@ -35,5 +35,7 @@ public void testBuildAsString() {
         path = path.add("b").add("c");
         assertThat(path.buildAsString(), is("a/b/c/"));
 
+        path = path.add("d/");
+        assertThat(path.buildAsString(), is("a/b/c/d/"));
     }
 }
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java
index 546d62a0e1fcb..d9fe806e53b39 100644
--- a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java
+++ b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java
@@ -31,7 +31,7 @@
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.NoMergePolicy;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.TermsQuery;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -136,19 +136,19 @@ public void setUp() throws Exception {
 
         // now go over each doc, build the relevant references and filter
         reader = DirectoryReader.open(iw);
-        List filterTerms = new ArrayList<>();
+        List filterTerms = new ArrayList<>();
         for (int docId = 0; docId < reader.maxDoc(); docId++) {
             Document doc = reader.document(docId);
             addFreqs(doc, referenceAll);
             if (!deletedIds.contains(doc.getField("id").stringValue())) {
                 addFreqs(doc, referenceNotDeleted);
                 if (randomBoolean()) {
-                    filterTerms.add(new Term("id", doc.getField("id").stringValue()));
+                    filterTerms.add(new BytesRef(doc.getField("id").stringValue()));
                     addFreqs(doc, referenceFilter);
                 }
             }
         }
-        filter = new TermsQuery(filterTerms);
+        filter = new TermInSetQuery("id",filterTerms);
     }
 
     private void addFreqs(Document doc, Map reference) {
diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
index 5b4a6e10ddaf2..6c7d2dd810fe9 100644
--- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
+++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
@@ -3114,7 +3114,7 @@ public long generateSeqNo() {
 
     public void testSequenceNumberAdvancesToMaxSeqNoOnEngineOpenOnReplica() throws IOException {
         final long v = Versions.MATCH_ANY;
-        final VersionType t = VersionType.INTERNAL;
+        final VersionType t = VersionType.EXTERNAL;
         final long ts = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP;
         final int docs = randomIntBetween(1, 32);
         InternalEngine initialEngine = null;
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java
index 00eecc669f8e2..5c418b7ce265e 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java
@@ -28,10 +28,11 @@
 import org.apache.lucene.analysis.core.WhitespaceTokenizer;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.TermsQuery;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.FuzzyQuery;
 import org.apache.lucene.search.RegexpQuery;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.util.BytesRef;
 import org.elasticsearch.common.lucene.Lucene;
 import org.elasticsearch.common.unit.Fuzziness;
 import org.elasticsearch.index.analysis.AnalyzerScope;
@@ -41,7 +42,9 @@
 import org.junit.Before;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.List;
 
 public class KeywordFieldTypeTests extends FieldTypeTestCase {
 
@@ -110,7 +113,10 @@ public void testTermsQuery() {
         MappedFieldType ft = createDefaultFieldType();
         ft.setName("field");
         ft.setIndexOptions(IndexOptions.DOCS);
-        assertEquals(new TermsQuery(new Term("field", "foo"), new Term("field", "bar")),
+        List terms = new ArrayList<>();
+        terms.add(new BytesRef("foo"));
+        terms.add(new BytesRef("bar"));
+        assertEquals(new TermInSetQuery("field", terms),
                 ft.termsQuery(Arrays.asList("foo", "bar"), null));
 
         ft.setIndexOptions(IndexOptions.NONE);
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java
index 70103111e5564..895bb97e16665 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java
@@ -18,15 +18,18 @@
  */
 package org.elasticsearch.index.mapper;
 
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.List;
 
 import org.apache.lucene.document.LongPoint;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.TermsQuery;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.FuzzyQuery;
 import org.apache.lucene.search.RegexpQuery;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.util.BytesRef;
 import org.elasticsearch.common.unit.Fuzziness;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.TextFieldMapper;
@@ -86,7 +89,10 @@ public void testTermsQuery() {
         MappedFieldType ft = createDefaultFieldType();
         ft.setName("field");
         ft.setIndexOptions(IndexOptions.DOCS);
-        assertEquals(new TermsQuery(new Term("field", "foo"), new Term("field", "bar")),
+        List terms = new ArrayList<>();
+        terms.add(new BytesRef("foo"));
+        terms.add(new BytesRef("bar"));
+        assertEquals(new TermInSetQuery("field", terms),
                 ft.termsQuery(Arrays.asList("foo", "bar"), null));
 
         ft.setIndexOptions(IndexOptions.NONE);
diff --git a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java
index 2b6482ec5df17..e21e157bcf77b 100644
--- a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java
@@ -20,7 +20,7 @@
 package org.elasticsearch.index.query;
 
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
-import org.apache.lucene.queries.TermsQuery;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
@@ -240,10 +240,9 @@ static void assertLateParsingQuery(Query query, String type, String id) throws I
         assertThat(booleanQuery.clauses().size(), equalTo(2));
         //check the inner ids query, we have to call rewrite to get to check the type it's executed against
         assertThat(booleanQuery.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST));
-        assertThat(booleanQuery.clauses().get(0).getQuery(), instanceOf(TermsQuery.class));
-        TermsQuery termsQuery = (TermsQuery) booleanQuery.clauses().get(0).getQuery();
-        // we need to rewrite once for TermsQuery -> TermInSetQuery and than againt TermInSetQuery -> ConstantScoreQuery
-        Query rewrittenTermsQuery = termsQuery.rewrite(null).rewrite(null);
+        assertThat(booleanQuery.clauses().get(0).getQuery(), instanceOf(TermInSetQuery.class));
+        TermInSetQuery termsQuery = (TermInSetQuery) booleanQuery.clauses().get(0).getQuery();
+        Query rewrittenTermsQuery = termsQuery.rewrite(null);
         assertThat(rewrittenTermsQuery, instanceOf(ConstantScoreQuery.class));
         ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) rewrittenTermsQuery;
         assertThat(constantScoreQuery.getQuery(), instanceOf(BooleanQuery.class));
diff --git a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java
index ccac82362a5aa..429f43aaee573 100644
--- a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java
@@ -20,7 +20,7 @@
 package org.elasticsearch.index.query;
 
 
-import org.apache.lucene.queries.TermsQuery;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.Query;
 import org.elasticsearch.cluster.metadata.MetaData;
@@ -76,7 +76,7 @@ protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, Se
         if (queryBuilder.ids().size() == 0) {
             assertThat(query, instanceOf(MatchNoDocsQuery.class));
         } else {
-            assertThat(query, instanceOf(TermsQuery.class));
+            assertThat(query, instanceOf(TermInSetQuery.class));
         }
     }
 
diff --git a/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java
index 41571d9f0901a..442075de306bc 100644
--- a/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java
@@ -19,7 +19,7 @@
 
 package org.elasticsearch.index.query;
 
-import org.apache.lucene.queries.TermsQuery;
+import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.MatchNoDocsQuery;
@@ -110,7 +110,7 @@ protected void doAssertLuceneQuery(TermsQueryBuilder queryBuilder, Query query,
             MatchNoDocsQuery matchNoDocsQuery = (MatchNoDocsQuery) query;
             assertThat(matchNoDocsQuery.toString(), containsString("No terms supplied for \"terms\" query."));
         } else {
-            assertThat(query, either(instanceOf(TermsQuery.class))
+            assertThat(query, either(instanceOf(TermInSetQuery.class))
                     .or(instanceOf(PointInSetQuery.class))
                     .or(instanceOf(ConstantScoreQuery.class)));
             if (query instanceof ConstantScoreQuery) {
@@ -131,7 +131,7 @@ protected void doAssertLuceneQuery(TermsQueryBuilder queryBuilder, Query query,
                 terms = queryBuilder.values();
             }
 
-            TermsQuery expected = new TermsQuery(queryBuilder.fieldName(),
+            TermInSetQuery expected = new TermInSetQuery(queryBuilder.fieldName(),
                     terms.stream().filter(Objects::nonNull).map(Object::toString).map(BytesRef::new).collect(Collectors.toList()));
             assertEquals(expected, query);
         }
diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java
index 0a0cddf5d8169..96f86f124dcac 100644
--- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java
+++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java
@@ -22,8 +22,17 @@
 import org.apache.lucene.store.AlreadyClosedException;
 import org.elasticsearch.Version;
 import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.DocWriteRequest;
 import org.elasticsearch.action.DocWriteResponse;
 import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.bulk.BulkItemRequest;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.bulk.BulkShardRequest;
+import org.elasticsearch.action.bulk.BulkShardResponse;
+import org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction;
+import org.elasticsearch.action.delete.DeleteRequest;
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.action.index.IndexResponse;
 import org.elasticsearch.action.support.PlainActionFuture;
@@ -157,10 +166,34 @@ public int appendDocs(final int numOfDoc) throws Exception {
 
         public IndexResponse index(IndexRequest indexRequest) throws Exception {
             PlainActionFuture listener = new PlainActionFuture<>();
-            new IndexingAction(indexRequest, listener, this).execute();
+            final ActionListener wrapBulkListener = ActionListener.wrap(
+                    bulkShardResponse -> listener.onResponse(bulkShardResponse.getResponses()[0].getResponse()),
+                    listener::onFailure);
+            BulkItemRequest[] items = new BulkItemRequest[1];
+            items[0] = new TestBulkItemRequest(0, indexRequest);
+            BulkShardRequest request = new BulkShardRequest(shardId, indexRequest.getRefreshPolicy(), items);
+            new IndexingAction(request, wrapBulkListener, this).execute();
             return listener.get();
         }
 
+        /** BulkItemRequest exposing get/set primary response */
+        public class TestBulkItemRequest extends BulkItemRequest {
+
+            TestBulkItemRequest(int id, DocWriteRequest request) {
+                super(id, request);
+            }
+
+            @Override
+            protected void setPrimaryResponse(BulkItemResponse primaryResponse) {
+                super.setPrimaryResponse(primaryResponse);
+            }
+
+            @Override
+            protected BulkItemResponse getPrimaryResponse() {
+                return super.getPrimaryResponse();
+            }
+        }
+
         public synchronized void startAll() throws IOException {
             startReplicas(replicas.size());
         }
@@ -486,22 +519,28 @@ public void respond(ActionListener listener) {
 
     }
 
-    class IndexingAction extends ReplicationAction {
+    class IndexingAction extends ReplicationAction {
 
-        IndexingAction(IndexRequest request, ActionListener listener, ReplicationGroup replicationGroup) {
+        IndexingAction(BulkShardRequest request, ActionListener listener, ReplicationGroup replicationGroup) {
             super(request, listener, replicationGroup, "indexing");
-            request.process(null, request.index());
         }
 
         @Override
-        protected PrimaryResult performOnPrimary(IndexShard primary, IndexRequest request) throws Exception {
-            IndexResponse response = indexOnPrimary(request, primary);
-            return new PrimaryResult(request, response);
+        protected PrimaryResult performOnPrimary(IndexShard primary, BulkShardRequest request) throws Exception {
+            final IndexRequest indexRequest = (IndexRequest) request.items()[0].request();
+            indexRequest.process(null, request.index());
+            final IndexResponse indexResponse = indexOnPrimary(indexRequest, primary);
+            BulkItemResponse[] itemResponses = new BulkItemResponse[1];
+            itemResponses[0] = new BulkItemResponse(0, indexRequest.opType(), indexResponse);
+            ((ReplicationGroup.TestBulkItemRequest) request.items()[0]).setPrimaryResponse(itemResponses[0]);
+            return new PrimaryResult(request, new BulkShardResponse(primary.shardId(), itemResponses));
         }
 
         @Override
-        protected void performOnReplica(IndexRequest request, IndexShard replica) throws IOException {
-            indexOnReplica(request, replica);
+        protected void performOnReplica(BulkShardRequest request, IndexShard replica) throws IOException {
+            final ReplicationGroup.TestBulkItemRequest bulkItemRequest = ((ReplicationGroup.TestBulkItemRequest) request.items()[0]);
+            final DocWriteResponse primaryResponse = bulkItemRequest.getPrimaryResponse().getResponse();
+            indexOnReplica(primaryResponse, ((IndexRequest) bulkItemRequest.request()), replica);
         }
     }
 
@@ -511,14 +550,6 @@ protected void performOnReplica(IndexRequest request, IndexShard replica) throws
     protected IndexResponse indexOnPrimary(IndexRequest request, IndexShard primary) throws Exception {
         final Engine.IndexResult indexResult = executeIndexRequestOnPrimary(request, primary,
                 null);
-        if (indexResult.hasFailure() == false) {
-            // update the version on request so it will happen on the replicas
-            final long version = indexResult.getVersion();
-            request.version(version);
-            request.versionType(request.versionType().versionTypeForReplicationAndRecovery());
-            request.setSeqNo(indexResult.getSeqNo());
-            assert request.versionType().validateVersionForWrites(request.version());
-        }
         request.primaryTerm(primary.getPrimaryTerm());
         TransportWriteActionTestHelper.performPostWriteActions(primary, request, indexResult.getTranslogLocation(), logger);
         return new IndexResponse(
@@ -533,8 +564,8 @@ protected IndexResponse indexOnPrimary(IndexRequest request, IndexShard primary)
     /**
      * indexes the given requests on the supplied replica shard
      */
-    protected void indexOnReplica(IndexRequest request, IndexShard replica) throws IOException {
-        final Engine.IndexResult result = executeIndexRequestOnReplica(request, replica);
+    protected void indexOnReplica(DocWriteResponse response, IndexRequest request, IndexShard replica) throws IOException {
+        final Engine.IndexResult result = executeIndexRequestOnReplica(response, request, replica);
         TransportWriteActionTestHelper.performPostWriteActions(replica, request, result.getTranslogLocation(), logger);
     }
 
diff --git a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java
index 3928f78c3c536..97e224f04a4e3 100644
--- a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java
+++ b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java
@@ -26,6 +26,7 @@
 import org.elasticsearch.action.admin.indices.flush.FlushRequest;
 import org.elasticsearch.action.index.IndexRequest;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.action.index.IndexResponse;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.index.engine.Engine;
@@ -166,9 +167,9 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception {
                 logger.info("--> indexing {} rollback docs", rollbackDocs);
                 for (int i = 0; i < rollbackDocs; i++) {
                     final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "rollback_" + i)
-                        .source("{}", XContentType.JSON);
-                    indexOnPrimary(indexRequest, oldPrimary);
-                    indexOnReplica(indexRequest, replica);
+                            .source("{}", XContentType.JSON);
+                    final IndexResponse primaryResponse = indexOnPrimary(indexRequest, oldPrimary);
+                    indexOnReplica(primaryResponse, indexRequest, replica);
                 }
                 if (randomBoolean()) {
                     oldPrimary.flush(new FlushRequest(index.getName()));
diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java
index 59c29a5b6a0e8..df32324987cab 100644
--- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java
@@ -142,12 +142,16 @@ public void testIndexCleanup() throws Exception {
             transportServiceNode3.addTracer(new ReclocationStartEndTracer(logger, beginRelocationLatch, endRelocationLatch));
             internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_3)).get();
             // wait for relocation to start
+            logger.info("--> waiting for relocation to start");
             beginRelocationLatch.await();
+            logger.info("--> starting disruption");
             disruption.startDisrupting();
             // wait for relocation to finish
+            logger.info("--> waiting for relocation to finish");
             endRelocationLatch.await();
             // wait a little so that cluster state observer is registered
             sleep(50);
+            logger.info("--> stopping disruption");
             disruption.stopDisrupting();
         } else {
             internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_3)).get();
diff --git a/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java b/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java
index 007b6ce1fc776..3d4903e04724d 100644
--- a/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java
+++ b/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java
@@ -40,6 +40,7 @@
 import static org.hamcrest.CoreMatchers.equalTo;
 import static org.hamcrest.Matchers.greaterThan;
 import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThan;
 import static org.hamcrest.Matchers.isEmptyOrNullString;
 import static org.hamcrest.Matchers.not;
 
@@ -91,6 +92,29 @@ public void testFsInfo() throws IOException {
         }
     }
 
+    public void testFsInfoOverflow() throws Exception {
+        FsInfo.Path pathStats = new FsInfo.Path("/foo/bar", null,
+                randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong());
+
+        // While not overflowing, keep adding
+        FsInfo.Path pathToAdd = new FsInfo.Path("/foo/baz", null,
+                randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong());
+        while ((pathStats.total + pathToAdd.total) > 0) {
+            // Add itself as a path, to increase the total bytes until it overflows
+            logger.info("--> adding {} bytes to {}, will be: {}", pathToAdd.total, pathStats.total, pathToAdd.total + pathStats.total);
+            pathStats.add(pathToAdd);
+            pathToAdd = new FsInfo.Path("/foo/baz", null,
+                randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong());
+        }
+
+        logger.info("--> adding {} bytes to {}, will be: {}", pathToAdd.total, pathStats.total, pathToAdd.total + pathStats.total);
+        assertThat(pathStats.total + pathToAdd.total, lessThan(0L));
+        pathStats.add(pathToAdd);
+
+        // Even after overflowing, it should not be negative
+        assertThat(pathStats.total, greaterThan(0L));
+    }
+
     public void testIoStats() {
         final AtomicReference> diskStats = new AtomicReference<>();
         diskStats.set(Arrays.asList(
diff --git a/core/src/test/java/org/elasticsearch/script/ScriptTests.java b/core/src/test/java/org/elasticsearch/script/ScriptTests.java
index fc841bd16486b..70c5af00f89c5 100644
--- a/core/src/test/java/org/elasticsearch/script/ScriptTests.java
+++ b/core/src/test/java/org/elasticsearch/script/ScriptTests.java
@@ -22,8 +22,8 @@
 import org.elasticsearch.common.io.stream.InputStreamStreamInput;
 import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
 import org.elasticsearch.common.xcontent.ToXContent;
-import org.elasticsearch.common.xcontent.XContent;
 import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.test.ESTestCase;
@@ -39,9 +39,8 @@
 public class ScriptTests extends ESTestCase {
 
     public void testScriptParsing() throws IOException {
-        XContent xContent = randomFrom(XContentType.JSON, XContentType.YAML).xContent();
-        Script expectedScript = createScript(xContent);
-        try (XContentBuilder builder = XContentBuilder.builder(xContent)) {
+        Script expectedScript = createScript();
+        try (XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()))) {
             expectedScript.toXContent(builder, ToXContent.EMPTY_PARAMS);
             try (XContentParser parser = createParser(builder)) {
                 Script actualScript = Script.parse(parser);
@@ -51,8 +50,7 @@ public void testScriptParsing() throws IOException {
     }
 
     public void testScriptSerialization() throws IOException {
-        XContent xContent = randomFrom(XContentType.JSON, XContentType.YAML).xContent();
-        Script expectedScript = createScript(xContent);
+        Script expectedScript = createScript();
         try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
             expectedScript.writeTo(new OutputStreamStreamOutput(out));
             try (ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray())) {
@@ -62,12 +60,12 @@ public void testScriptSerialization() throws IOException {
         }
     }
 
-    private Script createScript(XContent xContent) throws IOException {
+    private Script createScript() throws IOException {
         final Map params = randomBoolean() ? Collections.emptyMap() : Collections.singletonMap("key", "value");
         ScriptType scriptType = randomFrom(ScriptType.values());
         String script;
         if (scriptType == ScriptType.INLINE) {
-            try (XContentBuilder builder = XContentBuilder.builder(xContent)) {
+            try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
                 builder.startObject();
                 builder.field("field", randomAsciiOfLengthBetween(1, 5));
                 builder.endObject();
@@ -80,8 +78,8 @@ private Script createScript(XContent xContent) throws IOException {
             scriptType,
             scriptType == ScriptType.STORED ? null : randomFrom("_lang1", "_lang2", "_lang3"),
             script,
-            scriptType == ScriptType.INLINE ? Collections.singletonMap(Script.CONTENT_TYPE_OPTION, xContent.type().mediaType()) : null,
-            params
+            scriptType == ScriptType.INLINE ?
+                    Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()) : null, params
         );
     }
 
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java
index aab44c32fcf0d..a59a62deb5823 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java
@@ -29,12 +29,13 @@
 import org.apache.lucene.search.Weight;
 import org.elasticsearch.Version;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.lease.Releasables;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.util.MockBigArrays;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.index.cache.query.DisabledQueryCache;
 import org.elasticsearch.index.engine.Engine;
-import org.elasticsearch.index.fielddata.IndexFieldData;
 import org.elasticsearch.index.fielddata.IndexFieldDataCache;
 import org.elasticsearch.index.fielddata.IndexFieldDataService;
 import org.elasticsearch.index.mapper.MappedFieldType;
@@ -56,6 +57,8 @@
 import java.util.Collections;
 import java.util.List;
 
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -65,6 +68,8 @@
  * {@link AggregationBuilder} instance.
  */
 public abstract class AggregatorTestCase extends ESTestCase {
+    private List releasables = new ArrayList<>();
+
     protected  A createAggregator(B aggregationBuilder,
                                                                                       IndexSearcher indexSearcher,
                                                                                       MappedFieldType... fieldTypes) throws IOException {
@@ -99,6 +104,12 @@ public boolean shouldCache(Query query) throws IOException {
         when(searchContext.bigArrays()).thenReturn(new MockBigArrays(Settings.EMPTY, circuitBreakerService));
         when(searchContext.fetchPhase())
             .thenReturn(new FetchPhase(Arrays.asList(new FetchSourceSubPhase(), new DocValueFieldsFetchSubPhase())));
+        doAnswer(invocation -> {
+            /* Store the releasables so we can release them at the end of the test case. This is important because aggregations don't
+             * close their sub-aggregations. This is fairly similar to what the production code does. */
+            releasables.add((Releasable) invocation.getArguments()[0]);
+            return null;
+        }).when(searchContext).addReleasable(anyObject(), anyObject());
 
         // TODO: now just needed for top_hits, this will need to be revised for other agg unit tests:
         MapperService mapperService = mock(MapperService.class);
@@ -110,10 +121,9 @@ public boolean shouldCache(Query query) throws IOException {
 
         QueryShardContext queryShardContext = mock(QueryShardContext.class);
         for (MappedFieldType fieldType : fieldTypes) {
-            IndexFieldData fieldData = fieldType.fielddataBuilder().build(indexSettings, fieldType,
-                new IndexFieldDataCache.None(), circuitBreakerService, mock(MapperService.class));
             when(queryShardContext.fieldMapper(fieldType.name())).thenReturn(fieldType);
-            when(queryShardContext.getForField(fieldType)).thenReturn(fieldData);
+            when(queryShardContext.getForField(fieldType)).then(invocation -> fieldType.fielddataBuilder().build(
+                    indexSettings, fieldType, new IndexFieldDataCache.None(), circuitBreakerService, mock(MapperService.class)));
             when(searchContext.getQueryShardContext()).thenReturn(queryShardContext);
         }
 
@@ -126,13 +136,17 @@ protected  A search(IndexSe
                                                                              Query query,
                                                                              AggregationBuilder builder,
                                                                              MappedFieldType... fieldTypes) throws IOException {
-        try (C a = createAggregator(builder, searcher, fieldTypes)) {
+        C a = createAggregator(builder, searcher, fieldTypes);
+        try {
             a.preCollection();
             searcher.search(query, a);
             a.postCollection();
             @SuppressWarnings("unchecked")
             A internalAgg = (A) a.buildAggregation(0L);
             return internalAgg;
+        } finally {
+            Releasables.close(releasables);
+            releasables.clear();
         }
     }
 
@@ -168,31 +182,31 @@ protected  A searchAndReduc
         try {
             for (ShardSearcher subSearcher : subSearchers) {
                 C a = createAggregator(builder, subSearcher, fieldTypes);
-                try {
-                    a.preCollection();
-                    subSearcher.search(weight, a);
-                    a.postCollection();
-                    aggs.add(a.buildAggregation(0L));
-                } finally {
-                    closeAgg(a);
-                }
+                a.preCollection();
+                subSearcher.search(weight, a);
+                a.postCollection();
+                aggs.add(a.buildAggregation(0L));
             }
             if (aggs.isEmpty()) {
                 return null;
             } else {
+                if (randomBoolean()) {
+                    // sometimes do an incremental reduce
+                    List internalAggregations = randomSubsetOf(randomIntBetween(1, aggs.size()), aggs);
+                    A internalAgg = (A) aggs.get(0).doReduce(internalAggregations,
+                        new InternalAggregation.ReduceContext(root.context().bigArrays(), null, false));
+                    aggs.removeAll(internalAggregations);
+                    aggs.add(internalAgg);
+                }
+                // now do the final reduce
                 @SuppressWarnings("unchecked")
-                A internalAgg = (A) aggs.get(0).doReduce(aggs, new InternalAggregation.ReduceContext(root.context().bigArrays(), null));
+                A internalAgg = (A) aggs.get(0).doReduce(aggs, new InternalAggregation.ReduceContext(root.context().bigArrays(), null,
+                    true));
                 return internalAgg;
             }
         } finally {
-            closeAgg(root);
-        }
-    }
-
-    private void closeAgg(Aggregator agg) {
-        agg.close();
-        for (Aggregator sub : ((AggregatorBase) agg).subAggregators) {
-            closeAgg(sub);
+            Releasables.close(releasables);
+            releasables.clear();
         }
     }
 
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationTestCase.java
index 9ea06f3086fac..4e2ab0188c98e 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationTestCase.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationTestCase.java
@@ -57,8 +57,18 @@ public final void testReduceRandom() {
             inputs.add(t);
             toReduce.add(t);
         }
+        if (randomBoolean()) {
+            // we leave at least one in the list
+            List internalAggregations = randomSubsetOf(randomIntBetween(1, toReduceSize), toReduce);
+            InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(null, null, false);
+            @SuppressWarnings("unchecked")
+            T reduced = (T) inputs.get(0).reduce(internalAggregations, context);
+            toReduce.removeAll(internalAggregations);
+            toReduce.add(reduced);
+        }
+        InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(null, null, true);
         @SuppressWarnings("unchecked")
-        T reduced = (T) inputs.get(0).reduce(toReduce, null);
+        T reduced = (T) inputs.get(0).reduce(toReduce, context);
         assertReduced(reduced, inputs);
     }
 
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java
index 925ff86232a76..7307b756e3f1f 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java
@@ -170,9 +170,9 @@ private void assertSubset(Terms terms1, Terms terms2, long minDocCount, int size
                 if (size2++ == size) {
                     break;
                 }
-                assertTrue(it2.hasNext());
+                assertTrue("minDocCount: " + minDocCount, it2.hasNext());
                 final Terms.Bucket bucket2 = it2.next();
-                assertEquals(bucket1.getDocCount(), bucket2.getDocCount());
+                assertEquals("minDocCount: " + minDocCount, bucket1.getDocCount(), bucket2.getDocCount());
             }
         }
         assertFalse(it2.hasNext());
@@ -336,24 +336,8 @@ private void testMinDocCountOnTerms(String field, Script script, Terms.Order ord
                             .shardSize(cardinality + randomInt(10))
                             .minDocCount(minDocCount)).request();
             final SearchResponse response = client().search(request).get();
-            try {
-                assertAllSuccessful(response);
-                assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), minDocCount, size, include);
-            } catch (AssertionError ae) {
-                if (!retry) {
-                    throw ae;
-                }
-                logger.info("test failed. trying to see if it recovers after 1m.", ae);
-                try {
-                    Thread.sleep(60000);
-                    logger.debug("1m passed. retrying.");
-                    testMinDocCountOnTerms(field, script, order, include, false);
-                } catch (Exception secondFailure) {
-                    secondFailure.addSuppressed(ae);
-                    logger.error("exception on retry (will re-throw the original in a sec)", secondFailure);
-                }
-                throw ae;
-            }
+            assertAllSuccessful(response);
+            assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), minDocCount, size, include);
         }
     }
 
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java
index f2a030ab5e8ad..3f2163f25d927 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java
@@ -20,7 +20,10 @@
 package org.elasticsearch.search.aggregations.bucket;
 
 import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequestBuilder;
 import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.FilterClient;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.XContentType;
@@ -30,6 +33,7 @@
 import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
 import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
 import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.client.RandomizingClient;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -53,12 +57,43 @@ public class TermsDocCountErrorIT extends ESIntegTestCase {
     private static final String LONG_FIELD_NAME = "l_value";
     private static final String DOUBLE_FIELD_NAME = "d_value";
 
+
     public static String randomExecutionHint() {
         return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString();
     }
 
     private static int numRoutingValues;
 
+    public static Client client() {
+        Client client = ESIntegTestCase.client();
+        if (client instanceof RandomizingClient) {
+            return new FilterClient(client) {
+                /* this test doesn't work with multiple reduce phases since:
+                 * the error for a term is the sum of the errors across all aggs that need to be reduced.
+                 * if the term is in the aggregation, then we just use the associated error, but if it is not we need to use the
+                 * aggregation-level error, ie. the maximum count that a doc that is not in the top list could have.
+                 *
+                 * the problem is that the logic we have today assumes there is a single reduce. So for instance for the agg-level error
+                 * it takes the count of the last term. This is correct if the agg was produced on a shard: if it had a greater count
+                 * then it would be in the top list. However if we are on an intermediate reduce, this does not work anymore.
+                 *
+                 * Another assumption that does not hold is that right now if a term is present in an agg, we assume its count is accurate.
+                 * Again this is true if the agg was produced on a shard, but not if this is the result of an intermediate reduce.
+                 *
+                 * try with this seed and remove the setReduceUpTo below
+                 *  -Dtests.seed=B32081B1E8589AE5 -Dtests.class=org.elasticsearch.search.aggregations.bucket.TermsDocCountErrorIT
+                 *  -Dtests.method="testDoubleValueField" -Dtests.locale=lv -Dtests.timezone=WET
+                 * This must will be addressed in a followup to #23253
+                 */
+                @Override
+                public SearchRequestBuilder prepareSearch(String... indices) {
+                    return this.in.prepareSearch(indices).setBatchedReduceSize(512);
+                }
+            };
+        }
+        return client;
+    }
+
     @Override
     public void setupSuiteScopeCluster() throws Exception {
         assertAcked(client().admin().indices().prepareCreate("idx")
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java
new file mode 100644
index 0000000000000..900db1399955d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.sampler;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedNumericDocValuesField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.index.analysis.AnalyzerScope;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.NumberFieldMapper;
+import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType;
+import org.elasticsearch.search.aggregations.AggregatorTestCase;
+import org.elasticsearch.search.aggregations.metrics.min.Min;
+import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
+
+import java.io.IOException;
+
+public class SamplerAggregatorTests extends AggregatorTestCase {
+    /**
+     * Uses the sampler aggregation to find the minimum value of a field out of the top 3 scoring documents in a search.
+     */
+    public void testSampler() throws IOException {
+        TextFieldType textFieldType = new TextFieldType();
+        textFieldType.setIndexAnalyzer(new NamedAnalyzer("foo", AnalyzerScope.GLOBAL, new StandardAnalyzer()));
+        MappedFieldType numericFieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
+        numericFieldType.setName("int");
+
+        IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+        indexWriterConfig.setMaxBufferedDocs(100);
+        indexWriterConfig.setRAMBufferSizeMB(100); // flush on open to have a single segment with predictable docIds
+        try (Directory dir = newDirectory();
+                IndexWriter w = new IndexWriter(dir, indexWriterConfig)) {
+            for (long value : new long[] {7, 3, -10, -6, 5, 50}) {
+                Document doc = new Document();
+                StringBuilder text = new StringBuilder();
+                for (int i = 0; i < value; i++) {
+                    text.append("good ");
+                }
+                doc.add(new Field("text", text.toString(), textFieldType));
+                doc.add(new SortedNumericDocValuesField("int", value));
+                w.addDocument(doc);
+            }
+
+            SamplerAggregationBuilder aggBuilder = new SamplerAggregationBuilder("sampler")
+                    .shardSize(3)
+                    .subAggregation(new MinAggregationBuilder("min")
+                            .field("int"));
+            try (IndexReader reader = DirectoryReader.open(w)) {
+                assertEquals("test expects a single segment", 1, reader.leaves().size());
+                IndexSearcher searcher = new IndexSearcher(reader);
+                Sampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "good")), aggBuilder, textFieldType,
+                        numericFieldType);
+                Min min = sampler.getAggregations().get("min");
+                assertEquals(5.0, min.getValue(), 0);
+            }
+        }
+    }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java
index e896e1cc1fb82..2dc208d89fb44 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java
@@ -146,7 +146,8 @@ SignificanceHeuristic getRandomSignificanceheuristic() {
 
     public void testReduce() {
         List aggs = createInternalAggregations();
-        SignificantTerms reducedAgg = (SignificantTerms) aggs.get(0).doReduce(aggs, null);
+        InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(null, null, true);
+        SignificantTerms reducedAgg = (SignificantTerms) aggs.get(0).doReduce(aggs, context);
         assertThat(reducedAgg.getBuckets().size(), equalTo(2));
         assertThat(reducedAgg.getBuckets().get(0).getSubsetDf(), equalTo(8L));
         assertThat(reducedAgg.getBuckets().get(0).getSubsetSize(), equalTo(16L));
@@ -264,7 +265,7 @@ protected void checkParseException(ParseFieldRegistry significanceHeuristicParserRegistry,
             String heuristicString) throws IOException {
-        XContentParser stParser = createParser(JsonXContent.jsonXContent, 
+        XContentParser stParser = createParser(JsonXContent.jsonXContent,
                 "{\"field\":\"text\", " + heuristicString + ", \"min_doc_count\":200}");
         return parseSignificanceHeuristic(significanceHeuristicParserRegistry, stParser);
     }
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java
index cb3165f2beda6..f2977fd769205 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java
@@ -131,7 +131,7 @@ public void testMixLongAndDouble() throws Exception {
             }
             InternalAggregation.ReduceContext ctx =
                 new InternalAggregation.ReduceContext(new MockBigArrays(Settings.EMPTY,
-                    new NoneCircuitBreakerService()), null);
+                    new NoneCircuitBreakerService()), null, true);
             for (InternalAggregation internalAgg : aggs) {
                 InternalAggregation mergedAggs = internalAgg.doReduce(aggs, ctx);
                 assertTrue(mergedAggs instanceof DoubleTerms);
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java
new file mode 100644
index 0000000000000..10b306ad7177c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.SortedNumericDocValuesField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.common.CheckedConsumer;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.NumberFieldMapper;
+import org.elasticsearch.search.aggregations.AggregatorTestCase;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats;
+
+import java.io.IOException;
+import java.util.function.Consumer;
+
+public class ExtendedStatsAggregatorTests extends AggregatorTestCase {
+    private static final double TOLERANCE = 1e-5;
+
+    public void testEmpty() throws IOException {
+        MappedFieldType ft =
+            new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
+        ft.setName("field");
+        testCase(ft, iw -> {},
+            stats -> {
+                assertEquals(0d, stats.getCount(), 0);
+                assertEquals(0d, stats.getSum(), 0);
+                assertEquals(Float.NaN, stats.getAvg(), 0);
+                assertEquals(Double.POSITIVE_INFINITY, stats.getMin(), 0);
+                assertEquals(Double.NEGATIVE_INFINITY, stats.getMax(), 0);
+                assertEquals(Double.NaN, stats.getVariance(), 0);
+                assertEquals(Double.NaN, stats.getStdDeviation(), 0);
+                assertEquals(0d, stats.getSumOfSquares(), 0);
+            }
+        );
+    }
+
+    public void testRandomDoubles() throws IOException {
+        MappedFieldType ft =
+            new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE);
+        ft.setName("field");
+        final ExtendedSimpleStatsAggregator expected = new ExtendedSimpleStatsAggregator();
+        testCase(ft,
+            iw -> {
+                int numDocs = randomIntBetween(10, 50);
+                for (int i = 0; i < numDocs; i++) {
+                    Document doc = new Document();
+                    int numValues = randomIntBetween(1, 5);
+                    for (int j = 0; j < numValues; j++) {
+                        double value = randomDoubleBetween(-100d, 100d, true);
+                        long valueAsLong = NumericUtils.doubleToSortableLong(value);
+                        doc.add(new SortedNumericDocValuesField("field", valueAsLong));
+                        expected.add(value);
+                    }
+                    iw.addDocument(doc);
+                }
+            },
+            stats -> {
+                assertEquals(expected.count, stats.getCount(), 0);
+                assertEquals(expected.sum, stats.getSum(), TOLERANCE);
+                assertEquals(expected.min, stats.getMin(), 0);
+                assertEquals(expected.max, stats.getMax(), 0);
+                assertEquals(expected.sum / expected.count, stats.getAvg(), TOLERANCE);
+                assertEquals(expected.sumOfSqrs, stats.getSumOfSquares(), TOLERANCE);
+                assertEquals(expected.stdDev(), stats.getStdDeviation(), TOLERANCE);
+                assertEquals(expected.variance(), stats.getVariance(), TOLERANCE);
+                assertEquals(expected.stdDevBound(ExtendedStats.Bounds.LOWER, stats.getSigma()),
+                    stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER), TOLERANCE);
+                assertEquals(expected.stdDevBound(ExtendedStats.Bounds.UPPER, stats.getSigma()),
+                    stats.getStdDeviationBound(ExtendedStats.Bounds.UPPER), TOLERANCE);
+            }
+        );
+    }
+
+    public void testRandomLongs() throws IOException {
+        MappedFieldType ft =
+            new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
+        ft.setName("field");
+        final ExtendedSimpleStatsAggregator expected = new ExtendedSimpleStatsAggregator();
+        testCase(ft,
+            iw -> {
+                int numDocs = randomIntBetween(10, 50);
+                for (int i = 0; i < numDocs; i++) {
+                    Document doc = new Document();
+                    int numValues = randomIntBetween(1, 5);
+                    for (int j = 0; j < numValues; j++) {
+                        long value = randomIntBetween(-100, 100);
+                        doc.add(new SortedNumericDocValuesField("field", value));
+                        expected.add(value);
+                    }
+                    iw.addDocument(doc);
+                }
+            },
+            stats -> {
+                assertEquals(expected.count, stats.getCount(), 0);
+                assertEquals(expected.sum, stats.getSum(), TOLERANCE);
+                assertEquals(expected.min, stats.getMin(), 0);
+                assertEquals(expected.max, stats.getMax(), 0);
+                assertEquals(expected.sum / expected.count, stats.getAvg(), TOLERANCE);
+                assertEquals(expected.sumOfSqrs, stats.getSumOfSquares(), TOLERANCE);
+                assertEquals(expected.stdDev(), stats.getStdDeviation(), TOLERANCE);
+                assertEquals(expected.variance(), stats.getVariance(), TOLERANCE);
+                assertEquals(expected.stdDevBound(ExtendedStats.Bounds.LOWER, stats.getSigma()),
+                    stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER), TOLERANCE);
+                assertEquals(expected.stdDevBound(ExtendedStats.Bounds.UPPER, stats.getSigma()),
+                    stats.getStdDeviationBound(ExtendedStats.Bounds.UPPER), TOLERANCE);
+            }
+        );
+    }
+
+    public void testCase(MappedFieldType ft,
+                         CheckedConsumer buildIndex,
+                         Consumer verify) throws IOException {
+        try (Directory directory = newDirectory();
+             RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
+            buildIndex.accept(indexWriter);
+            try (IndexReader reader = indexWriter.getReader()) {
+                IndexSearcher searcher = new IndexSearcher(reader);
+                ExtendedStatsAggregationBuilder aggBuilder = new ExtendedStatsAggregationBuilder("my_agg")
+                    .field("field")
+                    .sigma(randomDoubleBetween(0, 10, true));
+                InternalExtendedStats stats = search(searcher, new MatchAllDocsQuery(), aggBuilder, ft);
+                verify.accept(stats);
+            }
+        }
+    }
+
+    static class ExtendedSimpleStatsAggregator extends StatsAggregatorTests.SimpleStatsAggregator {
+        double sumOfSqrs = 0;
+
+        void add(double value) {
+            super.add(value);
+            sumOfSqrs += (value * value);
+        }
+
+        double stdDev() {
+            return Math.sqrt(variance());
+        }
+
+        double stdDevBound(ExtendedStats.Bounds bounds, double sigma) {
+            if (bounds == ExtendedStats.Bounds.UPPER) {
+                return (sum / count) + (Math.sqrt(variance()) * sigma);
+            } else {
+                return (sum / count) - (Math.sqrt(variance()) * sigma);
+            }
+        }
+
+        double variance() {
+            return (sumOfSqrs - ((sum * sum) / count)) / count;
+        }
+    }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java
new file mode 100644
index 0000000000000..83e1815f3984d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.search.DocValueFormat;
+import org.elasticsearch.search.aggregations.InternalAggregationTestCase;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats;
+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
+import org.junit.Before;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+public class InternalExtendedStatsTests extends InternalAggregationTestCase {
+    private double sigma;
+
+    @Before
+    public void randomSigma() {
+        this.sigma = randomDoubleBetween(0, 10, true);
+    }
+
+    @Override
+    protected InternalExtendedStats createTestInstance(String name, List pipelineAggregators,
+                                                       Map metaData) {
+        long count = randomIntBetween(1, 50);
+        double[] minMax = new double[2];
+        minMax[0] = randomDouble();
+        minMax[0] = randomDouble();
+        double sum = randomDoubleBetween(0, 100, true);
+        return new InternalExtendedStats(name, count, sum, minMax[0], minMax[1],
+            randomDouble(), sigma, DocValueFormat.RAW,
+            pipelineAggregators, Collections.emptyMap());
+    }
+
+    @Override
+    protected void assertReduced(InternalExtendedStats reduced, List inputs) {
+        long expectedCount = 0;
+        double expectedSum = 0;
+        double expectedSumOfSquare = 0;
+        double expectedMin = Double.POSITIVE_INFINITY;
+        double expectedMax = Double.NEGATIVE_INFINITY;
+        for (InternalExtendedStats stats : inputs) {
+            assertEquals(sigma, stats.getSigma(), 0);
+            expectedCount += stats.getCount();
+            if (Double.compare(stats.getMin(), expectedMin) < 0) {
+                expectedMin = stats.getMin();
+            }
+            if (Double.compare(stats.getMax(), expectedMax) > 0) {
+                expectedMax = stats.getMax();
+            }
+            expectedSum += stats.getSum();
+            expectedSumOfSquare += stats.getSumOfSquares();
+        }
+        assertEquals(sigma, reduced.getSigma(), 0);
+        assertEquals(expectedCount, reduced.getCount());
+        assertEquals(expectedSum, reduced.getSum(), 1e-10);
+        assertEquals(expectedMin, reduced.getMin(), 0d);
+        assertEquals(expectedMax, reduced.getMax(), 0d);
+        assertEquals(expectedSumOfSquare, reduced.getSumOfSquares(), 1e-10);
+    }
+
+    @Override
+    protected Writeable.Reader instanceReader() {
+        return InternalExtendedStats::new;
+    }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java
new file mode 100644
index 0000000000000..db64bb8c65c76
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.search.DocValueFormat;
+import org.elasticsearch.search.aggregations.InternalAggregationTestCase;
+import org.elasticsearch.search.aggregations.metrics.stats.InternalStats;
+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+public class InternalStatsTests extends InternalAggregationTestCase {
+    @Override
+    protected InternalStats createTestInstance(String name, List pipelineAggregators,
+                                               Map metaData) {
+        long count = randomIntBetween(1, 50);
+        double[] minMax = new double[2];
+        minMax[0] = randomDouble();
+        minMax[0] = randomDouble();
+        double sum = randomDoubleBetween(0, 100, true);
+        return new InternalStats(name, count, sum, minMax[0], minMax[1], DocValueFormat.RAW,
+            pipelineAggregators, Collections.emptyMap());
+    }
+
+    @Override
+    protected void assertReduced(InternalStats reduced, List inputs) {
+        long expectedCount = 0;
+        double expectedSum = 0;
+        double expectedMin = Double.POSITIVE_INFINITY;
+        double expectedMax = Double.NEGATIVE_INFINITY;
+        for (InternalStats stats : inputs) {
+            expectedCount += stats.getCount();
+            if (Double.compare(stats.getMin(), expectedMin) < 0) {
+                expectedMin = stats.getMin();
+            }
+            if (Double.compare(stats.getMax(), expectedMax) > 0) {
+                expectedMax = stats.getMax();
+            }
+            expectedSum += stats.getSum();
+        }
+        assertEquals(expectedCount, reduced.getCount());
+        assertEquals(expectedSum, reduced.getSum(), 1e-10);
+        assertEquals(expectedMin, reduced.getMin(), 0d);
+        assertEquals(expectedMax, reduced.getMax(), 0d);
+    }
+
+    @Override
+    protected Writeable.Reader instanceReader() {
+        return InternalStats::new;
+    }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java
new file mode 100644
index 0000000000000..7286c7de0fed5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.SortedNumericDocValuesField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.common.CheckedConsumer;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.NumberFieldMapper;
+import org.elasticsearch.search.aggregations.AggregatorTestCase;
+import org.elasticsearch.search.aggregations.metrics.stats.InternalStats;
+import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder;
+
+import java.io.IOException;
+import java.util.function.Consumer;
+
+public class StatsAggregatorTests extends AggregatorTestCase {
+    static final double TOLERANCE = 1e-10;
+
+    public void testEmpty() throws IOException {
+        MappedFieldType ft =
+            new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
+        ft.setName("field");
+        testCase(ft, iw -> {},
+            stats -> {
+                assertEquals(0d, stats.getCount(), 0);
+                assertEquals(0d, stats.getSum(), 0);
+                assertEquals(Float.NaN, stats.getAvg(), 0);
+                assertEquals(Double.POSITIVE_INFINITY, stats.getMin(), 0);
+                assertEquals(Double.NEGATIVE_INFINITY, stats.getMax(), 0);
+            }
+        );
+    }
+
+    public void testRandomDoubles() throws IOException {
+        MappedFieldType ft =
+            new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE);
+        ft.setName("field");
+        final SimpleStatsAggregator expected = new SimpleStatsAggregator();
+        testCase(ft,
+            iw -> {
+                int numDocs = randomIntBetween(10, 50);
+                for (int i = 0; i < numDocs; i++) {
+                    Document doc = new Document();
+                    int numValues = randomIntBetween(1, 5);
+                    for (int j = 0; j < numValues; j++) {
+                        double value = randomDoubleBetween(-100d, 100d, true);
+                        long valueAsLong = NumericUtils.doubleToSortableLong(value);
+                        doc.add(new SortedNumericDocValuesField("field", valueAsLong));
+                        expected.add(value);
+                    }
+                    iw.addDocument(doc);
+                }
+            },
+            stats -> {
+                assertEquals(expected.count, stats.getCount(), 0);
+                assertEquals(expected.sum, stats.getSum(), TOLERANCE);
+                assertEquals(expected.min, stats.getMin(), 0);
+                assertEquals(expected.max, stats.getMax(), 0);
+                assertEquals(expected.sum / expected.count, stats.getAvg(), TOLERANCE);
+            }
+        );
+    }
+
+    public void testRandomLongs() throws IOException {
+        MappedFieldType ft =
+            new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
+        ft.setName("field");
+        final SimpleStatsAggregator expected = new SimpleStatsAggregator();
+        testCase(ft,
+            iw -> {
+                int numDocs = randomIntBetween(10, 50);
+                for (int i = 0; i < numDocs; i++) {
+                    Document doc = new Document();
+                    int numValues = randomIntBetween(1, 5);
+                    for (int j = 0; j < numValues; j++) {
+                        long value = randomIntBetween(-100, 100);
+                        doc.add(new SortedNumericDocValuesField("field", value));
+                        expected.add(value);
+                    }
+                    iw.addDocument(doc);
+                }
+            },
+            stats -> {
+                assertEquals(expected.count, stats.getCount(), 0);
+                assertEquals(expected.sum, stats.getSum(), TOLERANCE);
+                assertEquals(expected.min, stats.getMin(), 0);
+                assertEquals(expected.max, stats.getMax(), 0);
+                assertEquals(expected.sum / expected.count, stats.getAvg(), TOLERANCE);
+            }
+        );
+    }
+
+    public void testCase(MappedFieldType ft,
+                         CheckedConsumer buildIndex,
+                         Consumer verify) throws IOException {
+        try (Directory directory = newDirectory();
+             RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
+            buildIndex.accept(indexWriter);
+            try (IndexReader reader = indexWriter.getReader()) {
+                IndexSearcher searcher = new IndexSearcher(reader);
+                StatsAggregationBuilder aggBuilder = new StatsAggregationBuilder("my_agg").field("field");
+                InternalStats stats = search(searcher, new MatchAllDocsQuery(), aggBuilder, ft);
+                verify.accept(stats);
+            }
+        }
+    }
+
+    static class SimpleStatsAggregator {
+        long count = 0;
+        double min = Long.MAX_VALUE;
+        double max = Long.MIN_VALUE;
+        double sum = 0;
+
+        void add(double value) {
+            count ++;
+            if (Double.compare(value, min) < 0) {
+                min = value;
+            }
+            if (Double.compare(value, max) > 0) {
+                max = value;
+            }
+            sum += value;
+        }
+    }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java
index b2adee43a4baf..9ca7726b16445 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java
@@ -421,7 +421,7 @@ public void testBreadthFirstWithAggOrderAndScoreNeeded() throws Exception {
             assertThat(hits.getHits().length, equalTo(3));
 
             assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(4));
-            id --;
+            id--;
         }
     }
 
@@ -452,7 +452,8 @@ public void testPagination() throws Exception {
                                 .executionHint(randomExecutionHint())
                                 .field(TERMS_AGGS_FIELD)
                                 .subAggregation(
-                                        topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))
+                                        topHits("hits")
+                                            .sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))
                                                 .from(from)
                                                 .size(size)
                                 )
@@ -483,7 +484,8 @@ public void testPagination() throws Exception {
         assertThat(hits.getTotalHits(), equalTo(controlHits.getTotalHits()));
         assertThat(hits.getHits().length, equalTo(controlHits.getHits().length));
         for (int i = 0; i < hits.getHits().length; i++) {
-            logger.info("{}: top_hits: [{}][{}] control: [{}][{}]", i, hits.getAt(i).getId(), hits.getAt(i).getSortValues()[0], controlHits.getAt(i).getId(), controlHits.getAt(i).getSortValues()[0]);
+            logger.info("{}: top_hits: [{}][{}] control: [{}][{}]", i, hits.getAt(i).getId(), hits.getAt(i).getSortValues()[0],
+                controlHits.getAt(i).getId(), controlHits.getAt(i).getSortValues()[0]);
             assertThat(hits.getAt(i).getId(), equalTo(controlHits.getAt(i).getId()));
             assertThat(hits.getAt(i).getSortValues()[0], equalTo(controlHits.getAt(i).getSortValues()[0]));
         }
@@ -1000,51 +1002,55 @@ public void testNoStoredFields() throws Exception {
      * not using a script does get cached.
      */
     public void testDontCacheScripts() throws Exception {
-        assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long")
+        try {
+            assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long")
                 .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
                 .get());
-        indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1),
+            indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1),
                 client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2));
 
-        // Make sure we are starting with a clear cache
-        assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
+            // Make sure we are starting with a clear cache
+            assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
                 .getHitCount(), equalTo(0L));
-        assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
+            assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
                 .getMissCount(), equalTo(0L));
 
-        // Test that a request using a script field does not get cached
-        SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0)
+            // Test that a request using a script field does not get cached
+            SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0)
                 .addAggregation(topHits("foo").scriptField("bar",
                     new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()))).get();
-        assertSearchResponse(r);
+            assertSearchResponse(r);
 
-        assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
+            assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
                 .getHitCount(), equalTo(0L));
-        assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
+            assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
                 .getMissCount(), equalTo(0L));
 
-        // Test that a request using a script sort does not get cached
-        r = client().prepareSearch("cache_test_idx").setSize(0)
+            // Test that a request using a script sort does not get cached
+            r = client().prepareSearch("cache_test_idx").setSize(0)
                 .addAggregation(topHits("foo").sort(
-                        SortBuilders.scriptSort(
-                            new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()), ScriptSortType.STRING)))
+                    SortBuilders.scriptSort(
+                        new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()), ScriptSortType.STRING)))
                 .get();
-        assertSearchResponse(r);
+            assertSearchResponse(r);
 
-        assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
+            assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
                 .getHitCount(), equalTo(0L));
-        assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
+            assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
                 .getMissCount(), equalTo(0L));
 
-        // To make sure that the cache is working test that a request not using
-        // a script is cached
-        r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(topHits("foo")).get();
-        assertSearchResponse(r);
+            // To make sure that the cache is working test that a request not using
+            // a script is cached
+            r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(topHits("foo")).get();
+            assertSearchResponse(r);
 
-        assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
+            assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
                 .getHitCount(), equalTo(0L));
-        assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
+            assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
                 .getMissCount(), equalTo(1L));
+        } finally {
+            assertAcked(client().admin().indices().prepareDelete("cache_test_idx")); // delete this - if we use tests.iters it would fail
+        }
     }
 
     public void testWithRescore() {
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java
index cdf13d04444c4..9dd5715ff93de 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java
@@ -50,8 +50,7 @@ protected void assertReduced(InternalAvg reduced, List inputs) {
             counts += in.getCount();
         }
         assertEquals(counts, reduced.getCount());
-        assertEquals(sum, reduced.getSum(), Double.MIN_VALUE);
-        assertEquals(sum / counts, reduced.value(), Double.MIN_VALUE);
+        assertEquals(sum, reduced.getSum(), 0.00000001);
+        assertEquals(sum / counts, reduced.value(), 0.00000001);
     }
-
 }
diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java
index daad1a51a085b..b277de64a9d9f 100644
--- a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java
+++ b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java
@@ -46,4 +46,17 @@ public void testBoundedByBetweenMinAndMax() {
         assertThat(ThreadPool.boundedBy(value, min, max), equalTo(value));
     }
 
+    public void testAbsoluteTime() throws Exception {
+        TestThreadPool threadPool = new TestThreadPool("test");
+        try {
+            long currentTime = System.currentTimeMillis();
+            long gotTime = threadPool.absoluteTimeInMillis();
+            long delta = Math.abs(gotTime - currentTime);
+            assertTrue("thread pool cached absolute time " + gotTime + " is too far from real current time " + currentTime,
+                delta < 10000); // the delta can be large, we just care it is the same order of magnitude
+        } finally {
+            threadPool.shutdown();
+            threadPool.close();
+        }
+    }
 }
diff --git a/distribution/build.gradle b/distribution/build.gradle
index 3463487caea2e..e95ccf0932000 100644
--- a/distribution/build.gradle
+++ b/distribution/build.gradle
@@ -81,20 +81,16 @@ project.rootProject.subprojects.findAll { it.path.startsWith(':modules:') }.each
     }
   }
   // We would like to make sure integ tests for the distribution run after
-  // integ tests for the modules included in the distribution. However, gradle
-  // has a bug where depending on a task with a finalizer can sometimes not make
-  // the finalizer task follow the original task immediately. To work around this,
-  // we make the mustRunAfter the finalizer task itself.
-  // See https://discuss.gradle.org/t/cross-project-task-dependencies-ordering-screws-up-finalizers/13190
+  // integ tests for the modules included in the distribution.
   project.configure(distributions.findAll { it.name != 'integ-test-zip' }) { Project distribution ->
     distribution.afterEvaluate({
       // some integTest tasks will have multiple finalizers
-      distribution.integTest.mustRunAfter module.tasks.find { t -> t.name.matches(".*integTest\$") }.getFinalizedBy()
+      distribution.integTest.mustRunAfter module.tasks.find { t -> t.name.matches(".*integTest\$") }
     })
   }
   // also want to make sure the module's integration tests run after the integ-test-zip (ie rest tests)
   module.afterEvaluate({
-    module.integTest.mustRunAfter(':distribution:integ-test-zip:integTest#stop')
+    module.integTest.mustRunAfter(':distribution:integ-test-zip:integTest')
   })
   restTestExpansions['expected.modules.count'] += 1
 }
@@ -129,14 +125,13 @@ configure(distributions) {
   project.integTest {
     dependsOn project.assemble
     includePackaged project.name == 'integ-test-zip'
-    cluster {
-      distribution = project.name
-    }
     if (project.name != 'integ-test-zip') {
-      // see note above with module mustRunAfter about why integTest#stop is used here
-      mustRunAfter ':distribution:integ-test-zip:integTest#stop'
+      mustRunAfter ':distribution:integ-test-zip:integTest'
     }
   }
+  project.integTestCluster {
+    distribution = project.name
+  }
 
   processTestResources {
     inputs.properties(project(':distribution').restTestExpansions)
diff --git a/docs/build.gradle b/docs/build.gradle
index b4cf09c150fa2..36727b12e5097 100644
--- a/docs/build.gradle
+++ b/docs/build.gradle
@@ -81,8 +81,6 @@ buildRestTests.expectedUnconvertedCandidates = [
   'reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc',
   'reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc',
   'reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc',
-  'reference/cat/recovery.asciidoc',
-  'reference/cat/shards.asciidoc',
   'reference/cat/snapshots.asciidoc',
   'reference/cat/templates.asciidoc',
   'reference/cat/thread_pool.asciidoc',
@@ -131,27 +129,25 @@ buildRestTests.expectedUnconvertedCandidates = [
   'reference/search/request/inner-hits.asciidoc',
 ]
 
-integTest {
-  cluster {
-    setting 'script.inline', 'true'
-    setting 'script.stored', 'true'
-    setting 'script.max_compilations_per_minute', '1000'
-    /* Enable regexes in painless so our tests don't complain about example
-     * snippets that use them. */
-    setting 'script.painless.regex.enabled', 'true'
-    Closure configFile = {
-      extraConfigFile it, "src/test/cluster/config/$it"
-    }
-    configFile 'scripts/my_script.painless'
-    configFile 'scripts/my_init_script.painless'
-    configFile 'scripts/my_map_script.painless'
-    configFile 'scripts/my_combine_script.painless'
-    configFile 'scripts/my_reduce_script.painless'
-    configFile 'userdict_ja.txt'
-    configFile 'KeywordTokenizer.rbbi'
-    // Whitelist reindexing from the local node so we can test it.
-    setting 'reindex.remote.whitelist', '127.0.0.1:*'
+integTestCluster {
+  setting 'script.inline', 'true'
+  setting 'script.stored', 'true'
+  setting 'script.max_compilations_per_minute', '1000'
+  /* Enable regexes in painless so our tests don't complain about example
+   * snippets that use them. */
+  setting 'script.painless.regex.enabled', 'true'
+  Closure configFile = {
+    extraConfigFile it, "src/test/cluster/config/$it"
   }
+  configFile 'scripts/my_script.painless'
+  configFile 'scripts/my_init_script.painless'
+  configFile 'scripts/my_map_script.painless'
+  configFile 'scripts/my_combine_script.painless'
+  configFile 'scripts/my_reduce_script.painless'
+  configFile 'userdict_ja.txt'
+  configFile 'KeywordTokenizer.rbbi'
+  // Whitelist reindexing from the local node so we can test it.
+  setting 'reindex.remote.whitelist', '127.0.0.1:*'
 }
 
 // Build the cluster with all plugins
@@ -163,10 +159,8 @@ project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each {
     return
   }
   subproj.afterEvaluate { // need to wait until the project has been configured
-    integTest {
-      cluster {
-        plugin subproj.path
-      }
+    integTestCluster {
+      plugin subproj.path
     }
   }
 }
diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc
index 7cdeee684d083..cbf5fc42ce5c1 100644
--- a/docs/reference/cat/recovery.asciidoc
+++ b/docs/reference/cat/recovery.asciidoc
@@ -12,17 +12,24 @@ way for shards to be loaded from disk when a node starts up.
 As an example, here is what the recovery state of a cluster may look like when there
 are no shards in transit from one node to another:
 
-[source,sh]
+[source,js]
 ----------------------------------------------------------------------------
-> curl -XGET 'localhost:9200/_cat/recovery?v'
-index shard time type  stage source_host  source_node target_host target_node repository snapshot files files_percent bytes bytes_percent
- total_files total_bytes translog translog_percent total_translog
-index 0     87ms store done  127.0.0.1        I8hydUG      127.0.0.1        I8hydUG      n/a        n/a      0     0.0%          0     0.0%          0           0           0        100.0%           0
-index 1     97ms store done  127.0.0.1        I8hydUG      127.0.0.1        I8hydUG      n/a        n/a      0     0.0%          0     0.0%          0           0           0        100.0%           0
-index 2     93ms store done  127.0.0.1        I8hydUG      127.0.0.1        I8hydUG      n/a        n/a      0     0.0%          0     0.0%          0           0           0        100.0%           0
-index 3     90ms store done  127.0.0.1        I8hydUG      127.0.0.1        I8hydUG      n/a        n/a      0     0.0%          0     0.0%          0           0           0        100.0%           0
-index 4     9ms  store done  127.0.0.1        I8hydUG      127.0.0.1        I8hydUG      n/a        n/a      0     0.0%          0     0.0%          0           0           0        100.0%           0
+GET _cat/recovery?v
 ---------------------------------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
+
+The response of this request will be something like:
+
+[source,js]
+---------------------------------------------------------------------------
+index   shard time type  stage source_host source_node target_host target_node repository snapshot files files_recovered files_percent files_total bytes bytes_recovered bytes_percent bytes_total translog_ops translog_ops_recovered translog_ops_percent
+twitter 0     13ms store done  n/a         n/a         127.0.0.1   node-0      n/a        n/a      0     0               100%          13          0     0               100%          9928        0            0                      100.0%
+---------------------------------------------------------------------------
+// TESTRESPONSE[s/store/empty_store/]
+// TESTRESPONSE[s/100%/0.0%/]
+// TESTRESPONSE[s/9928/0/]
+// TESTRESPONSE[s/13/\\d+/ _cat]
 
 In the above case, the source and target nodes are the same because the recovery
 type was store, i.e. they were read from local storage on node start.
@@ -31,43 +38,46 @@ Now let's see what a live recovery looks like. By increasing the replica count
 of our index and bringing another node online to host the replicas, we can see
 what a live shard recovery looks like.
 
-[source,sh]
+[source,js]
 ----------------------------------------------------------------------------
-> curl -XPUT 'localhost:9200/wiki/_settings' -d'{"number_of_replicas":1}'
-{"acknowledged":true}
-
-> curl -XGET 'localhost:9200/_cat/recovery?v&h=i,s,t,ty,st,shost,thost,f,fp,b,bp'
-i     s t      ty      st    shost  thost  f     fp      b        bp
-wiki  0 1252ms store   done  hostA  hostA  4     100.0%  23638870 100.0%
-wiki  0 1672ms replica index hostA  hostB  4     75.0%   23638870 48.8%
-wiki  1 1698ms replica index hostA  hostB  4     75.0%   23348540 49.4%
-wiki  1 4812ms store   done  hostA  hostA  33    100.0%  24501912 100.0%
-wiki  2 1689ms replica index hostA  hostB  4     75.0%   28681851 40.2%
-wiki  2 5317ms store   done  hostA  hostA  36    100.0%  30267222 100.0%
+GET _cat/recovery?v&h=i,s,t,ty,st,shost,thost,f,fp,b,bp
+---------------------------------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
+
+This will return a line like:
+
+[source,js]
+---------------------------------------------------------------------------
+i       s t      ty   st    shost       thost       f     fp      b bp
+twitter 0 1252ms peer done  192.168.1.1 192.168.1.2 0     100.0%  0 100.0%
 ----------------------------------------------------------------------------
+// TESTRESPONSE[s/peer/empty_store/]
+// TESTRESPONSE[s/192.168.1.2/127.0.0.1/]
+// TESTRESPONSE[s/192.168.1.1/n\/a/]
+// TESTRESPONSE[s/100.0%/0.0%/]
+// TESTRESPONSE[s/1252/\\d+/ _cat]
 
-We can see in the above listing that our 3 initial shards are in various stages
-of being replicated from one node to another. Notice that the recovery type is
-shown as `replica`. The files and bytes copied are real-time measurements.
+We can see in the above listing that our thw twitter shard was recovered from another node.
+Notice that the recovery type is shown as `peer`. The files and bytes copied are
+real-time measurements.
 
 Finally, let's see what a snapshot recovery looks like. Assuming I have previously
 made a backup of my index, I can restore it using the <>
 API.
 
-[source,sh]
+[source,js]
 --------------------------------------------------------------------------------
-> curl -XPOST 'localhost:9200/_snapshot/imdb/snapshot_2/_restore'
-{"acknowledged":true}
-> curl -XGET 'localhost:9200/_cat/recovery?v&h=i,s,t,ty,st,rep,snap,f,fp,b,bp'
-i     s t      ty       st    rep        snap     f     fp      b     bp
-imdb  0 1978ms snapshot done  imdb       snap_1   79    8.0%    12086 9.0%
-imdb  1 2790ms snapshot index imdb       snap_1   88    7.7%    11025 8.1%
-imdb  2 2790ms snapshot index imdb       snap_1   85    0.0%    12072 0.0%
-imdb  3 2796ms snapshot index imdb       snap_1   85    2.4%    12048 7.2%
-imdb  4  819ms snapshot init  imdb       snap_1   0     0.0%    0     0.0%
---------------------------------------------------------------------------------
-
-
-
+GET _cat/recovery?v&h=i,s,t,ty,st,rep,snap,f,fp,b,bp
+---------------------------------------------------------------------------
+// CONSOLE
+// TEST[skip:no need to execute snapshot/restore here]
 
+This will show a recovery of type snapshot in the response
 
+[source,js]
+---------------------------------------------------------------------------
+i       s t      ty       st    rep     snap   f  fp   b     bp
+twitter 0 1978ms snapshot done  twitter snap_1 79 8.0% 12086 9.0%
+--------------------------------------------------------------------------------
+// TESTRESPONSE[_cat]
diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc
index 0a915190dbcad..562117c3a6fe3 100644
--- a/docs/reference/cat/shards.asciidoc
+++ b/docs/reference/cat/shards.asciidoc
@@ -5,15 +5,26 @@ The `shards` command is the detailed view of what nodes contain which
 shards.  It will tell you if it's a primary or replica, the number of
 docs, the bytes it takes on disk, and the node where it's located.
 
-Here we see a single index, with three primary shards and no replicas:
-
-[source,sh]
---------------------------------------------------
-% curl 192.168.56.20:9200/_cat/shards
-wiki1 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA
-wiki1 1 p STARTED 3013 29.6mb 192.168.56.30 bGG90GE
-wiki1 2 p STARTED 3973 38.1mb 192.168.56.20 I8hydUG
---------------------------------------------------
+Here we see a single index, with one primary shard and no replicas:
+
+[source,js]
+---------------------------------------------------------------------------
+GET _cat/shards
+---------------------------------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
+
+This will return
+
+[source,js]
+---------------------------------------------------------------------------
+twitter 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA
+---------------------------------------------------------------------------
+// TESTRESPONSE[s/3014/\\d+/]
+// TESTRESPONSE[s/31.1/\\d+\.\\d+/]
+// TESTRESPONSE[s/mb/.*/]
+// TESTRESPONSE[s/192.168.56.10/.*/]
+// TESTRESPONSE[s/H5dfFeA/node-0/ _cat]
 
 [float]
 [[index-pattern]]
@@ -23,30 +34,47 @@ If you have many shards, you may wish to limit which indices show up
 in the output.  You can always do this with `grep`, but you can save
 some bandwidth by supplying an index pattern to the end.
 
-[source,sh]
---------------------------------------------------
-% curl 192.168.56.20:9200/_cat/shards/wiki*
-wiki2 0 p STARTED 197 3.2mb 192.168.56.10 H5dfFeA
-wiki2 1 p STARTED 205 5.9mb 192.168.56.30 bGG90GE
-wiki2 2 p STARTED 275 7.8mb 192.168.56.20 I8hydUG
---------------------------------------------------
+[source,js]
+---------------------------------------------------------------------------
+GET _cat/shards/twitt*
+---------------------------------------------------------------------------
+// CONSOLE
+// TEST[setup:twitter]
+
+Which will return the following
+
+[source,js]
+---------------------------------------------------------------------------
+twitter 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA
+---------------------------------------------------------------------------
+// TESTRESPONSE[s/3014/\\d+/]
+// TESTRESPONSE[s/31.1/\\d+\.\\d+/]
+// TESTRESPONSE[s/mb/.*/]
+// TESTRESPONSE[s/192.168.56.10/.*/]
+// TESTRESPONSE[s/H5dfFeA/node-0/ _cat]
 
 
 [float]
 [[relocation]]
 === Relocation
 
-Let's say you've checked your health and you see two relocating
+Let's say you've checked your health and you see a relocating
 shards.  Where are they from and where are they going?
 
-[source,sh]
---------------------------------------------------
-% curl 192.168.56.10:9200/_cat/health
-1384315316 20:01:56 foo green 3 3 12 6 2 0 0
-% curl 192.168.56.10:9200/_cat/shards | fgrep RELO
-wiki1 0 r RELOCATING 3014 31.1mb 192.168.56.20 I8hydUG -> 192.168.56.30 bGG90GE
-wiki1 1 r RELOCATING 3013 29.6mb 192.168.56.10 H5dfFeA -> 192.168.56.30 bGG90GE
---------------------------------------------------
+[source,js]
+---------------------------------------------------------------------------
+GET _cat/shards
+---------------------------------------------------------------------------
+// CONSOLE
+// TEST[skip:for now, relocation cannot be recreated]
+
+A relocating shard will be shown as follows
+
+[source,js]
+---------------------------------------------------------------------------
+twitter 0 p RELOCATING 3014 31.1mb 192.168.56.10 H5dfFeA -> -> 192.168.56.30 bGG90GE
+---------------------------------------------------------------------------
+// TESTRESPONSE[_cat]
 
 [float]
 [[states]]
@@ -55,42 +83,45 @@ wiki1 1 r RELOCATING 3013 29.6mb 192.168.56.10 H5dfFeA -> 192.168.56.30 bGG90GE
 Before a shard can be used, it goes through an `INITIALIZING` state.
 `shards` can show you which ones.
 
-[source,sh]
---------------------------------------------------
-% curl -XPUT 192.168.56.20:9200/_settings -d'{"number_of_replicas":1}'
-{"acknowledged":true}
-% curl 192.168.56.20:9200/_cat/shards
-wiki1 0 p STARTED      3014 31.1mb 192.168.56.10 H5dfFeA
-wiki1 0 r INITIALIZING    0 14.3mb 192.168.56.30 bGG90GE
-wiki1 1 p STARTED      3013 29.6mb 192.168.56.30 bGG90GE
-wiki1 1 r INITIALIZING    0 13.1mb 192.168.56.20 I8hydUG
-wiki1 2 r INITIALIZING    0   14mb 192.168.56.10 H5dfFeA
-wiki1 2 p STARTED      3973 38.1mb 192.168.56.20 I8hydUG
---------------------------------------------------
+[source,js]
+---------------------------------------------------------------------------
+GET _cat/shards
+---------------------------------------------------------------------------
+// CONSOLE
+// TEST[skip:there is no guarantee to test for shards in initializing state]
+
+You can the the initializing state in the response like this
+
+[source,js]
+---------------------------------------------------------------------------
+twitter 0 p STARTED      3014 31.1mb 192.168.56.10 H5dfFeA
+twitter 0 r INITIALIZING    0 14.3mb 192.168.56.30 bGG90GE
+---------------------------------------------------------------------------
+// TESTRESPONSE[_cat]
 
 If a shard cannot be assigned, for example you've overallocated the
 number of replicas for the number of nodes in the cluster, the shard
 will remain `UNASSIGNED` with the <> `ALLOCATION_FAILED`.
 
-[source,sh]
---------------------------------------------------
-% curl -XPUT 192.168.56.20:9200/_settings -d'{"number_of_replicas":3}'
-% curl 192.168.56.20:9200/_cat/health
-1384316325 20:18:45 foo yellow 3 3 9 3 0 0 3
-% curl 192.168.56.20:9200/_cat/shards
-wiki1 0 p STARTED    3014 31.1mb 192.168.56.10 H5dfFeA
-wiki1 0 r STARTED    3014 31.1mb 192.168.56.30 bGG90GE
-wiki1 0 r STARTED    3014 31.1mb 192.168.56.20 I8hydUG
-wiki1 0 r UNASSIGNED ALLOCATION_FAILED
-wiki1 1 r STARTED    3013 29.6mb 192.168.56.10 H5dfFeA
-wiki1 1 p STARTED    3013 29.6mb 192.168.56.30 bGG90GE
-wiki1 1 r STARTED    3013 29.6mb 192.168.56.20 I8hydUG
-wiki1 1 r UNASSIGNED ALLOCATION_FAILED
-wiki1 2 r STARTED    3973 38.1mb 192.168.56.10 H5dfFeA
-wiki1 2 r STARTED    3973 38.1mb 192.168.56.30 bGG90GE
-wiki1 2 p STARTED    3973 38.1mb 192.168.56.20 I8hydUG
-wiki1 2 r UNASSIGNED ALLOCATION_FAILED
---------------------------------------------------
+You can use the shards API to find out that reason.
+
+[source,js]
+---------------------------------------------------------------------------
+GET _cat/shards?h=index,shard,prirep,state,unassigned.reason
+---------------------------------------------------------------------------
+// CONSOLE
+// TEST[skip:for now]
+
+The reason for an unassigned shard will be listed as the last field
+
+[source,js]
+---------------------------------------------------------------------------
+twitter 0 p STARTED    3014 31.1mb 192.168.56.10 H5dfFeA
+twitter 0 r STARTED    3014 31.1mb 192.168.56.30 bGG90GE
+twitter 0 r STARTED    3014 31.1mb 192.168.56.20 I8hydUG
+twitter 0 r UNASSIGNED ALLOCATION_FAILED
+---------------------------------------------------------------------------
+// TESTRESPONSE[_cat]
 
 [float]
 [[reason-unassigned]]
diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc
index dc73b4408e621..f6fb84ea9297c 100644
--- a/docs/reference/cluster/health.asciidoc
+++ b/docs/reference/cluster/health.asciidoc
@@ -34,7 +34,7 @@ Returns this:
   "active_shards_percent_as_number": 50.0
 }
 --------------------------------------------------
-// TESTRESPONSE[s/testcluster/docs_integTest/]
+// TESTRESPONSE[s/testcluster/docs_integTestCluster/]
 // TESTRESPONSE[s/"number_of_pending_tasks" : 0,/"number_of_pending_tasks" : $body.number_of_pending_tasks,/]
 // TESTRESPONSE[s/"task_max_waiting_in_queue_millis": 0/"task_max_waiting_in_queue_millis": $body.task_max_waiting_in_queue_millis/]
 
diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc
index afb084f3bad1b..f52964f346a67 100755
--- a/docs/reference/getting-started.asciidoc
+++ b/docs/reference/getting-started.asciidoc
@@ -217,7 +217,7 @@ epoch      timestamp cluster       status node.total node.data shards pri relo i
 1475247709 17:01:49  elasticsearch green           1         1      0   0    0    0        0             0                  -                100.0%
 --------------------------------------------------
 // TESTRESPONSE[s/0             0/0             [01]/]
-// TESTRESPONSE[s/1475247709 17:01:49  elasticsearch/\\d+ \\d+:\\d+:\\d+ docs_integTest/ _cat]
+// TESTRESPONSE[s/1475247709 17:01:49  elasticsearch/\\d+ \\d+:\\d+:\\d+ docs_integTestCluster/ _cat]
 
 We can see that our cluster named "elasticsearch" is up with a green status.
 
@@ -401,7 +401,7 @@ If we study the above commands carefully, we can actually see a pattern of how w
 --------------------------------------------------
 // NOTCONSOLE
 
-This REST access pattern is pervasive throughout all the API commands that if you can simply remember it, you will have a good head start at mastering Elasticsearch.
+This REST access pattern is so pervasive throughout all the API commands that if you can simply remember it, you will have a good head start at mastering Elasticsearch.
 
 == Modifying Your Data
 
diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java
index d97fb64a16d70..0914ea2910c2b 100644
--- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java
+++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java
@@ -22,7 +22,6 @@
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.search.aggregations.InternalAggregation;
-import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation;
 import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
 
 import java.io.IOException;
@@ -35,7 +34,7 @@
 /**
  * Computes distribution statistics over multiple fields
  */
-public class InternalMatrixStats extends InternalMetricsAggregation implements MatrixStats {
+public class InternalMatrixStats extends InternalAggregation implements MatrixStats {
     /** per shard stats needed to compute stats */
     private final RunningStats stats;
     /** final result */
diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle
index ee8c330193461..23286edd28aea 100644
--- a/modules/lang-expression/build.gradle
+++ b/modules/lang-expression/build.gradle
@@ -35,8 +35,6 @@ dependencyLicenses {
   mapping from: /asm-.*/, to: 'asm'
 }
 
-integTest {
-  cluster {
-    setting 'script.max_compilations_per_minute', '1000'
-  }
+integTestCluster {
+  setting 'script.max_compilations_per_minute', '1000'
 }
diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle
index 8fed78aca3249..8b695fbf574bc 100644
--- a/modules/lang-mustache/build.gradle
+++ b/modules/lang-mustache/build.gradle
@@ -27,11 +27,9 @@ dependencies {
   compile "com.github.spullara.mustache.java:compiler:0.9.3"
 }
 
-integTest {
-  cluster {
-    setting 'script.inline', 'true'
-    setting 'script.stored', 'true'
-    setting 'script.max_compilations_per_minute', '1000'
-    setting 'path.scripts', "${project.buildDir}/resources/test/templates"
-  }
+integTestCluster {
+  setting 'script.inline', 'true'
+  setting 'script.stored', 'true'
+  setting 'script.max_compilations_per_minute', '1000'
+  setting 'path.scripts', "${project.buildDir}/resources/test/templates"
 }
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java
index 43232cc8f6593..799d378e05fc7 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java
+++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java
@@ -51,8 +51,6 @@
 
 public class CustomMustacheFactory extends DefaultMustacheFactory {
 
-    static final String CONTENT_TYPE_PARAM = "content_type";
-
     static final String JSON_MIME_TYPE_WITH_CHARSET = "application/json; charset=UTF-8";
     static final String JSON_MIME_TYPE = "application/json";
     static final String PLAIN_TEXT_MIME_TYPE = "text/plain";
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java
index c7964a716f84c..ce9a894a7bf1a 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java
+++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java
@@ -32,6 +32,7 @@
 import org.elasticsearch.script.CompiledScript;
 import org.elasticsearch.script.ExecutableScript;
 import org.elasticsearch.script.GeneralScriptException;
+import org.elasticsearch.script.Script;
 import org.elasticsearch.script.ScriptEngineService;
 import org.elasticsearch.script.SearchScript;
 import org.elasticsearch.search.lookup.SearchLookup;
@@ -43,8 +44,6 @@
 import java.util.Collections;
 import java.util.Map;
 
-import static org.elasticsearch.script.mustache.CustomMustacheFactory.CONTENT_TYPE_PARAM;
-
 /**
  * Main entry point handling template registration, compilation and
  * execution.
@@ -94,10 +93,10 @@ public Object compile(String templateName, String templateSource, Map params) {
-        if (params == null || params.isEmpty() || params.containsKey(CONTENT_TYPE_PARAM) == false) {
+        if (params == null || params.isEmpty() || params.containsKey(Script.CONTENT_TYPE_OPTION) == false) {
             return new CustomMustacheFactory();
         }
-        return new CustomMustacheFactory(params.get(CONTENT_TYPE_PARAM));
+        return new CustomMustacheFactory(params.get(Script.CONTENT_TYPE_OPTION));
     }
 
     @Override
@@ -142,7 +141,7 @@ private class MustacheExecutableScript implements ExecutableScript {
          **/
         MustacheExecutableScript(CompiledScript template, Map vars) {
             this.template = template;
-            this.vars = vars == null ? Collections.emptyMap() : vars;
+            this.vars = vars == null ? Collections.emptyMap() : vars;
         }
 
         @Override
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java
index a1abe5ad9a690..7d386833a6fec 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java
+++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java
@@ -66,8 +66,9 @@ public class RestSearchTemplateAction extends BaseRestHandler {
         PARSER.declareField((parser, request, value) -> {
             request.setScriptType(ScriptType.INLINE);
             if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
-                try (XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType())) {
-                    request.setScript(builder.copyCurrentStructure(parser).bytes().utf8ToString());
+                //convert the template to json which is the only supported XContentType (see CustomMustacheFactory#createEncoder)
+                try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
+                    request.setScript(builder.copyCurrentStructure(parser).string());
                 } catch (IOException e) {
                     throw new ParsingException(parser.getTokenLocation(), "Could not parse inline template", e);
                 }
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java
index 6158e80c241d8..22d7da774eb7c 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java
+++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java
@@ -26,6 +26,7 @@
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.xcontent.StatusToXContentObject;
 import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.rest.RestStatus;
 
 import java.io.IOException;
@@ -81,7 +82,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
             response.toXContent(builder, params);
         } else {
             builder.startObject();
-            builder.rawField("template_output", source);
+            //we can assume the template is always json as we convert it before compiling it
+            builder.rawField("template_output", source, XContentType.JSON);
             builder.endObject();
         }
         return builder;
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java
index 7d83fcaf5813b..d7b0406238278 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java
+++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java
@@ -32,6 +32,7 @@
 import org.elasticsearch.common.xcontent.NamedXContentRegistry;
 import org.elasticsearch.common.xcontent.XContentFactory;
 import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
 import org.elasticsearch.index.query.QueryParseContext;
 import org.elasticsearch.script.ExecutableScript;
 import org.elasticsearch.script.Script;
@@ -82,8 +83,8 @@ protected void doExecute(SearchTemplateRequest request, ActionListener params = randomBoolean() ? singletonMap(CONTENT_TYPE_PARAM, JSON_MIME_TYPE) : emptyMap();
+        final Map params = randomBoolean() ? singletonMap(Script.CONTENT_TYPE_OPTION, JSON_MIME_TYPE) : emptyMap();
 
         Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params);
         CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script);
@@ -75,7 +75,7 @@ public void testJsonEscapeEncoder() {
 
     public void testDefaultEncoder() {
         final ScriptEngineService engine = new MustacheScriptEngineService(Settings.EMPTY);
-        final Map params = singletonMap(CONTENT_TYPE_PARAM, PLAIN_TEXT_MIME_TYPE);
+        final Map params = singletonMap(Script.CONTENT_TYPE_OPTION, PLAIN_TEXT_MIME_TYPE);
 
         Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params);
         CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script);
@@ -87,7 +87,7 @@ public void testDefaultEncoder() {
 
     public void testUrlEncoder() {
         final ScriptEngineService engine = new MustacheScriptEngineService(Settings.EMPTY);
-        final Map params = singletonMap(CONTENT_TYPE_PARAM, X_WWW_FORM_URLENCODED_MIME_TYPE);
+        final Map params = singletonMap(Script.CONTENT_TYPE_OPTION, X_WWW_FORM_URLENCODED_MIME_TYPE);
 
         Mustache script = (Mustache) engine.compile(null, "{\"field\": \"{{value}}\"}", params);
         CompiledScript compiled = new CompiledScript(INLINE, null, MustacheScriptEngineService.NAME, script);
diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle
index dc56039afeedd..c9e013d8c6d7f 100644
--- a/modules/lang-painless/build.gradle
+++ b/modules/lang-painless/build.gradle
@@ -47,10 +47,8 @@ dependencies {
 ant.references['regenerate.classpath'] = new Path(ant.project, configurations.regenerate.asPath)
 ant.importBuild 'ant.xml'
 
-integTest {
-  cluster {
-    setting 'script.max_compilations_per_minute', '1000'
-  }
+integTestCluster {
+  setting 'script.max_compilations_per_minute', '1000'
 }
 
 /* Build Javadoc for the Java classes in Painless's public API that are in the
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java
index 95f154469db88..dbefd548cd232 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java
@@ -23,6 +23,29 @@
 import org.elasticsearch.painless.Definition.Sort;
 import org.elasticsearch.painless.Definition.Type;
 
+import java.util.Objects;
+
+import static org.elasticsearch.painless.Definition.BOOLEAN_OBJ_TYPE;
+import static org.elasticsearch.painless.Definition.BOOLEAN_TYPE;
+import static org.elasticsearch.painless.Definition.BYTE_OBJ_TYPE;
+import static org.elasticsearch.painless.Definition.BYTE_TYPE;
+import static org.elasticsearch.painless.Definition.CHAR_OBJ_TYPE;
+import static org.elasticsearch.painless.Definition.CHAR_TYPE;
+import static org.elasticsearch.painless.Definition.DEF_TYPE;
+import static org.elasticsearch.painless.Definition.DOUBLE_OBJ_TYPE;
+import static org.elasticsearch.painless.Definition.DOUBLE_TYPE;
+import static org.elasticsearch.painless.Definition.FLOAT_OBJ_TYPE;
+import static org.elasticsearch.painless.Definition.FLOAT_TYPE;
+import static org.elasticsearch.painless.Definition.INT_OBJ_TYPE;
+import static org.elasticsearch.painless.Definition.INT_TYPE;
+import static org.elasticsearch.painless.Definition.LONG_OBJ_TYPE;
+import static org.elasticsearch.painless.Definition.LONG_TYPE;
+import static org.elasticsearch.painless.Definition.NUMBER_TYPE;
+import static org.elasticsearch.painless.Definition.OBJECT_TYPE;
+import static org.elasticsearch.painless.Definition.SHORT_OBJ_TYPE;
+import static org.elasticsearch.painless.Definition.SHORT_TYPE;
+import static org.elasticsearch.painless.Definition.STRING_TYPE;
+
 /**
  * Used during the analysis phase to collect legal type casts and promotions
  * for type-checking and later to write necessary casts in the bytecode.
@@ -30,9 +53,9 @@
 public final class AnalyzerCaster {
 
     public static Cast getLegalCast(Location location, Type actual, Type expected, boolean explicit, boolean internal) {
-        if (actual == null || expected == null) {
-            throw new IllegalStateException("Neither actual [" + actual + "] nor expected [" + expected + "] can be null");
-        }
+        Objects.requireNonNull(actual);
+        Objects.requireNonNull(expected);
+
         if (actual.equals(expected)) {
             return null;
         }
@@ -41,15 +64,15 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
             case BOOL:
                 switch (expected.sort) {
                     case DEF:
-                        return new Cast(actual, Definition.DEF_TYPE, explicit, false, false, true, false);
+                        return new Cast(BOOLEAN_OBJ_TYPE, DEF_TYPE, explicit, null, null, BOOLEAN_TYPE, null);
                     case OBJECT:
-                        if (Definition.OBJECT_TYPE.equals(expected) && internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                        if (OBJECT_TYPE.equals(expected) && internal)
+                            return new Cast(BOOLEAN_OBJ_TYPE, OBJECT_TYPE, explicit, null, null, BOOLEAN_TYPE, null);
 
                         break;
                     case BOOL_OBJ:
                         if (internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                            return new Cast(BOOLEAN_TYPE, BOOLEAN_TYPE, explicit, null, null, null, BOOLEAN_TYPE);
                 }
 
                 break;
@@ -60,53 +83,57 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case LONG:
                     case FLOAT:
                     case DOUBLE:
-                        return new Cast(actual, expected, explicit);
+                        return new Cast(BYTE_TYPE, expected, explicit);
                     case CHAR:
                         if (explicit)
-                            return new Cast(actual, expected, true);
+                            return new Cast(BYTE_TYPE, CHAR_TYPE, true);
 
                         break;
                     case DEF:
-                        return new Cast(actual, Definition.DEF_TYPE, explicit, false, false, true, false);
+                        return new Cast(BYTE_OBJ_TYPE, DEF_TYPE, explicit, null, null, BYTE_TYPE, null);
                     case OBJECT:
-                        if (Definition.OBJECT_TYPE.equals(expected) && internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                        if (OBJECT_TYPE.equals(expected) && internal)
+                            return new Cast(BYTE_OBJ_TYPE, OBJECT_TYPE, explicit, null, null, BYTE_TYPE, null);
 
                         break;
                     case NUMBER:
+                        if (internal)
+                            return new Cast(BYTE_OBJ_TYPE, NUMBER_TYPE, explicit, null, null, BYTE_TYPE, null);
+
+                        break;
                     case BYTE_OBJ:
                         if (internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                            return new Cast(BYTE_TYPE, BYTE_TYPE, explicit, null, null, null, BYTE_TYPE);
 
                         break;
                     case SHORT_OBJ:
                         if (internal)
-                            return new Cast(actual,Definition.SHORT_TYPE, explicit, false, false, false, true);
+                            return new Cast(BYTE_TYPE, SHORT_TYPE, explicit, null, null, null, SHORT_TYPE);
 
                         break;
                     case INT_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.INT_TYPE, explicit, false, false, false, true);
+                            return new Cast(BYTE_TYPE, INT_TYPE, explicit, null, null, null, INT_TYPE);
 
                         break;
                     case LONG_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.LONG_TYPE, explicit, false, false, false, true);
+                            return new Cast(BYTE_TYPE, LONG_TYPE, explicit, null, null, null, LONG_TYPE);
 
                         break;
                     case FLOAT_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.FLOAT_TYPE, explicit, false, false, false, true);
+                            return new Cast(BYTE_TYPE, FLOAT_TYPE, explicit, null, null, null, FLOAT_TYPE);
 
                         break;
                     case DOUBLE_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.DOUBLE_TYPE, explicit, false, false, false, true);
+                            return new Cast(BYTE_TYPE, DOUBLE_TYPE, explicit, null, null, null, DOUBLE_TYPE);
 
                         break;
                     case CHAR_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.CHAR_TYPE, explicit, false, false, false, true);
+                            return new Cast(BYTE_TYPE, CHAR_TYPE, true, null, null, null, CHAR_TYPE);
 
                         break;
                 }
@@ -118,54 +145,58 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case LONG:
                     case FLOAT:
                     case DOUBLE:
-                        return new Cast(actual, expected, explicit);
+                        return new Cast(SHORT_TYPE, expected, explicit);
                     case BYTE:
                     case CHAR:
                         if (explicit)
-                            return new Cast(actual, expected, true);
+                            return new Cast(SHORT_TYPE, expected, true);
 
                         break;
                     case DEF:
-                        return new Cast(actual, Definition.DEF_TYPE, explicit, false, false, true, false);
+                        return new Cast(SHORT_OBJ_TYPE, DEF_TYPE, explicit, null, null, SHORT_TYPE, null);
                     case OBJECT:
-                        if (Definition.OBJECT_TYPE.equals(expected) && internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                        if (OBJECT_TYPE.equals(expected) && internal)
+                            return new Cast(SHORT_OBJ_TYPE, OBJECT_TYPE, explicit, null, null, SHORT_TYPE, null);
 
                         break;
                     case NUMBER:
+                        if (internal)
+                            return new Cast(SHORT_OBJ_TYPE, NUMBER_TYPE, explicit, null, null, SHORT_TYPE, null);
+
+                        break;
                     case SHORT_OBJ:
                         if (internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                            return new Cast(SHORT_TYPE, SHORT_TYPE, explicit, null, null, null, SHORT_TYPE);
 
                         break;
                     case INT_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.INT_TYPE, explicit, false, false, false, true);
+                            return new Cast(SHORT_TYPE, INT_TYPE, explicit, null, null, null, INT_TYPE);
 
                         break;
                     case LONG_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.LONG_TYPE, explicit, false, false, false, true);
+                            return new Cast(SHORT_TYPE, LONG_TYPE, explicit, null, null, null, LONG_TYPE);
 
                         break;
                     case FLOAT_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.FLOAT_TYPE, explicit, false, false, false, true);
+                            return new Cast(SHORT_TYPE, FLOAT_TYPE, explicit, null, null, null, FLOAT_TYPE);
 
                         break;
                     case DOUBLE_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.DOUBLE_TYPE, explicit, false, false, false, true);
+                            return new Cast(SHORT_TYPE, DOUBLE_TYPE, explicit, null, null, null, DOUBLE_TYPE);
 
                         break;
                     case BYTE_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.BYTE_TYPE, true, false, false, false, true);
+                            return new Cast(SHORT_TYPE, BYTE_TYPE, true, null, null, null, BYTE_TYPE);
 
                         break;
                     case CHAR_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.CHAR_TYPE, true, false, false, false, true);
+                            return new Cast(SHORT_TYPE, CHAR_TYPE, true, null, null, null, CHAR_TYPE);
 
                         break;
                 }
@@ -177,7 +208,7 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case LONG:
                     case FLOAT:
                     case DOUBLE:
-                        return new Cast(actual, expected, explicit);
+                        return new Cast(CHAR_TYPE, expected, explicit);
                     case BYTE:
                     case SHORT:
                         if (explicit)
@@ -185,48 +216,52 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
 
                         break;
                     case DEF:
-                        return new Cast(actual, Definition.DEF_TYPE, explicit, false, false, true, false);
+                        return new Cast(CHAR_OBJ_TYPE, DEF_TYPE, explicit, null, null, CHAR_TYPE, null);
                     case OBJECT:
-                        if (Definition.OBJECT_TYPE.equals(expected) && internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                        if (OBJECT_TYPE.equals(expected) && internal)
+                            return new Cast(CHAR_OBJ_TYPE, OBJECT_TYPE, explicit, null, null, CHAR_TYPE, null);
 
                         break;
                     case NUMBER:
+                        if (internal)
+                            return new Cast(CHAR_OBJ_TYPE, NUMBER_TYPE, explicit, null, null, CHAR_TYPE, null);
+
+                        break;
                     case CHAR_OBJ:
                         if (internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                            return new Cast(CHAR_TYPE, CHAR_TYPE, explicit, null, null, null, CHAR_TYPE);
 
                         break;
                     case STRING:
-                        return new Cast(actual, Definition.STRING_TYPE, explicit, false, false, false, false);
+                        return new Cast(CHAR_TYPE, STRING_TYPE, explicit);
                     case INT_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.INT_TYPE, explicit, false, false, false, true);
+                            return new Cast(CHAR_TYPE, INT_TYPE, explicit, null, null, null, INT_TYPE);
 
                         break;
                     case LONG_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.LONG_TYPE, explicit, false, false, false, true);
+                            return new Cast(CHAR_TYPE, LONG_TYPE, explicit, null, null, null, LONG_TYPE);
 
                         break;
                     case FLOAT_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.FLOAT_TYPE, explicit, false, false, false, true);
+                            return new Cast(CHAR_TYPE, FLOAT_TYPE, explicit, null, null, null, FLOAT_TYPE);
 
                         break;
                     case DOUBLE_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.DOUBLE_TYPE, explicit, false, false, false, true);
+                            return new Cast(CHAR_TYPE, DOUBLE_TYPE, explicit, null, null, null, DOUBLE_TYPE);
 
                         break;
                     case BYTE_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.BYTE_TYPE, true, false, false, false, true);
+                            return new Cast(CHAR_TYPE, BYTE_TYPE, true, null, null, null, BYTE_TYPE);
 
                         break;
                     case SHORT_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.SHORT_TYPE, true, false, false, false, true);
+                            return new Cast(CHAR_TYPE, SHORT_TYPE, true, null, null, null, SHORT_TYPE);
 
                         break;
                 }
@@ -237,55 +272,59 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case LONG:
                     case FLOAT:
                     case DOUBLE:
-                        return new Cast(actual, expected, explicit);
+                        return new Cast(INT_TYPE, expected, explicit);
                     case BYTE:
                     case SHORT:
                     case CHAR:
                         if (explicit)
-                            return new Cast(actual, expected, true);
+                            return new Cast(INT_TYPE, expected, true);
 
                         break;
                     case DEF:
-                        return new Cast(actual, Definition.DEF_TYPE, explicit, false, false, true, false);
+                        return new Cast(INT_OBJ_TYPE, DEF_TYPE, explicit, null, null, INT_TYPE, null);
                     case OBJECT:
-                        if (Definition.OBJECT_TYPE.equals(expected) && internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                        if (OBJECT_TYPE.equals(expected) && internal)
+                            return new Cast(INT_OBJ_TYPE, OBJECT_TYPE, explicit, null, null, INT_TYPE, null);
 
                         break;
                     case NUMBER:
+                        if (internal)
+                            return new Cast(INT_OBJ_TYPE, NUMBER_TYPE, explicit, null, null, INT_TYPE, null);
+
+                        break;
                     case INT_OBJ:
                         if (internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                            return new Cast(INT_TYPE, INT_TYPE, explicit, null, null, null, INT_TYPE);
 
                         break;
                     case LONG_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.LONG_TYPE, explicit, false, false, false, true);
+                            return new Cast(INT_TYPE, LONG_TYPE, explicit, null, null, null, LONG_TYPE);
 
                         break;
                     case FLOAT_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.FLOAT_TYPE, explicit, false, false, false, true);
+                            return new Cast(INT_TYPE, FLOAT_TYPE, explicit, null, null, null, FLOAT_TYPE);
 
                         break;
                     case DOUBLE_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.DOUBLE_TYPE, explicit, false, false, false, true);
+                            return new Cast(INT_TYPE, DOUBLE_TYPE, explicit, null, null, null, DOUBLE_TYPE);
 
                         break;
                     case BYTE_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.BYTE_TYPE, true, false, false, false, true);
+                            return new Cast(INT_TYPE, BYTE_TYPE, true, null, null, null, BYTE_TYPE);
 
                         break;
                     case SHORT_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.SHORT_TYPE, true, false, false, false, true);
+                            return new Cast(INT_TYPE, SHORT_TYPE, true, null, null, null, SHORT_TYPE);
 
                         break;
                     case CHAR_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.CHAR_TYPE, true, false, false, false, true);
+                            return new Cast(INT_TYPE, CHAR_TYPE, true, null, null, null, CHAR_TYPE);
 
                         break;
                 }
@@ -295,7 +334,7 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                 switch (expected.sort) {
                     case FLOAT:
                     case DOUBLE:
-                        return new Cast(actual, expected, explicit);
+                        return new Cast(LONG_TYPE, expected, explicit);
                     case BYTE:
                     case SHORT:
                     case CHAR:
@@ -305,46 +344,50 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
 
                         break;
                     case DEF:
-                        return new Cast(actual, Definition.DEF_TYPE, explicit, false, false, true, false);
+                        return new Cast(LONG_TYPE, DEF_TYPE, explicit, null, null, LONG_TYPE, null);
                     case OBJECT:
-                        if (Definition.OBJECT_TYPE.equals(expected) && internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                        if (OBJECT_TYPE.equals(expected) && internal)
+                            return new Cast(LONG_TYPE, actual, explicit, null, null, LONG_TYPE, null);
 
                         break;
                     case NUMBER:
+                        if (internal)
+                            return new Cast(LONG_OBJ_TYPE, NUMBER_TYPE, explicit, null, null, LONG_TYPE, null);
+
+                        break;
                     case LONG_OBJ:
                         if (internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                            return new Cast(LONG_TYPE, LONG_TYPE, explicit, null, null, null, LONG_TYPE);
 
                         break;
                     case FLOAT_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.FLOAT_TYPE, explicit, false, false, false, true);
+                            return new Cast(LONG_TYPE, FLOAT_TYPE, explicit, null, null, null, FLOAT_TYPE);
 
                         break;
                     case DOUBLE_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.DOUBLE_TYPE, explicit, false, false, false, true);
+                            return new Cast(LONG_TYPE, DOUBLE_TYPE, explicit, null, null, null, DOUBLE_TYPE);
 
                         break;
                     case BYTE_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.BYTE_TYPE, true, false, false, false, true);
+                            return new Cast(LONG_TYPE, BYTE_TYPE, true, null, null, null, BYTE_TYPE);
 
                         break;
                     case SHORT_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.SHORT_TYPE, true, false, false, false, true);
+                            return new Cast(LONG_TYPE, SHORT_TYPE, true, null, null, null, SHORT_TYPE);
 
                         break;
                     case CHAR_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.CHAR_TYPE, true, false, false, false, true);
+                            return new Cast(LONG_TYPE, CHAR_TYPE, true, null, null, null, CHAR_TYPE);
 
                         break;
                     case INT_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.INT_TYPE, true, false, false, false, true);
+                            return new Cast(LONG_TYPE, INT_TYPE, true, null, null, null, INT_TYPE);
 
                         break;
                 }
@@ -364,46 +407,50 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
 
                         break;
                     case DEF:
-                        return new Cast(actual, Definition.DEF_TYPE, explicit, false, false, true, false);
+                        return new Cast(FLOAT_OBJ_TYPE, DEF_TYPE, explicit, null, null, FLOAT_TYPE, null);
                     case OBJECT:
-                        if (Definition.OBJECT_TYPE.equals(expected) && internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                        if (OBJECT_TYPE.equals(expected) && internal)
+                            return new Cast(FLOAT_OBJ_TYPE, OBJECT_TYPE, explicit, null, null, FLOAT_TYPE, null);
 
                         break;
                     case NUMBER:
+                        if (internal)
+                            return new Cast(FLOAT_OBJ_TYPE, NUMBER_TYPE, explicit, null, null, FLOAT_TYPE, null);
+
+                        break;
                     case FLOAT_OBJ:
                         if (internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                            return new Cast(FLOAT_TYPE, FLOAT_TYPE, explicit, null, null, null, FLOAT_TYPE);
 
                         break;
                     case DOUBLE_OBJ:
                         if (internal)
-                            return new Cast(actual, Definition.DOUBLE_TYPE, explicit, false, false, false, true);
+                            return new Cast(FLOAT_TYPE, DOUBLE_TYPE, explicit, null, null, null, DOUBLE_TYPE);
 
                         break;
                     case BYTE_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.BYTE_TYPE, true, false, false, false, true);
+                            return new Cast(FLOAT_TYPE, BYTE_TYPE, true, null, null, null, BYTE_TYPE);
 
                         break;
                     case SHORT_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.SHORT_TYPE, true, false, false, false, true);
+                            return new Cast(FLOAT_TYPE, SHORT_TYPE, true, null, null, null, SHORT_TYPE);
 
                         break;
                     case CHAR_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.CHAR_TYPE, true, false, false, false, true);
+                            return new Cast(FLOAT_TYPE, CHAR_TYPE, true, null, null, null, CHAR_TYPE);
 
                         break;
                     case INT_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.INT_TYPE, true, false, false, false, true);
+                            return new Cast(FLOAT_TYPE, INT_TYPE, true, null, null, null, INT_TYPE);
 
                         break;
                     case LONG_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.LONG_TYPE, true, false, false, false, true);
+                            return new Cast(FLOAT_TYPE, LONG_TYPE, true, null, null, null, LONG_TYPE);
 
                         break;
                 }
@@ -417,91 +464,95 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case INT:
                     case FLOAT:
                         if (explicit)
-                            return new Cast(actual, expected, true);
+                            return new Cast(DOUBLE_TYPE, expected, true);
 
                         break;
                     case DEF:
-                        return new Cast(actual, Definition.DEF_TYPE, explicit, false, false, true, false);
+                        return new Cast(DOUBLE_OBJ_TYPE, DEF_TYPE, explicit, null, null, DOUBLE_TYPE, null);
                     case OBJECT:
-                        if (Definition.OBJECT_TYPE.equals(expected) && internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                        if (OBJECT_TYPE.equals(expected) && internal)
+                            return new Cast(DOUBLE_OBJ_TYPE, OBJECT_TYPE, explicit, null, null, DOUBLE_TYPE, null);
 
                         break;
                     case NUMBER:
+                        if (internal)
+                            return new Cast(DOUBLE_OBJ_TYPE, NUMBER_TYPE, explicit, null, null, DOUBLE_TYPE, null);
+
+                        break;
                     case DOUBLE_OBJ:
                         if (internal)
-                            return new Cast(actual, actual, explicit, false, false, false, true);
+                            return new Cast(DOUBLE_TYPE, DOUBLE_TYPE, explicit, null, null, null, DOUBLE_TYPE);
 
                         break;
                     case BYTE_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.BYTE_TYPE, true, false, false, false, true);
+                            return new Cast(DOUBLE_TYPE, BYTE_TYPE, true, null, null, null, BYTE_TYPE);
 
                         break;
                     case SHORT_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.SHORT_TYPE, true, false, false, false, true);
+                            return new Cast(DOUBLE_TYPE, SHORT_TYPE, true, null, null, null, SHORT_TYPE);
 
                         break;
                     case CHAR_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.CHAR_TYPE, true, false, false, false, true);
+                            return new Cast(DOUBLE_TYPE, CHAR_TYPE, true, null, null, null, CHAR_TYPE);
 
                         break;
                     case INT_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.INT_TYPE, true, false, false, false, true);
+                            return new Cast(DOUBLE_TYPE, INT_TYPE, true, null, null, null, INT_TYPE);
 
                         break;
                     case LONG_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.LONG_TYPE, true, false, false, false, true);
+                            return new Cast(DOUBLE_TYPE, LONG_TYPE, true, null, null, null, LONG_TYPE);
 
                         break;
                     case FLOAT_OBJ:
                         if (explicit && internal)
-                            return new Cast(actual, Definition.FLOAT_TYPE, true, false, false, false, true);
+                            return new Cast(DOUBLE_TYPE, FLOAT_TYPE, true, null, null, null, FLOAT_TYPE);
 
                         break;
                 }
 
                 break;
             case OBJECT:
-                if (Definition.OBJECT_TYPE.equals(actual))
+                if (OBJECT_TYPE.equals(actual))
                     switch (expected.sort) {
                         case BYTE:
                             if (internal && explicit)
-                                return new Cast(actual, Definition.BYTE_OBJ_TYPE, true, false, true, false, false);
+                                return new Cast(OBJECT_TYPE, BYTE_OBJ_TYPE, true, null, BYTE_TYPE, null, null);
 
                             break;
                         case SHORT:
                             if (internal && explicit)
-                                return new Cast(actual, Definition.SHORT_OBJ_TYPE, true, false, true, false, false);
+                                return new Cast(OBJECT_TYPE, SHORT_OBJ_TYPE, true, null, SHORT_TYPE, null, null);
 
                             break;
                         case CHAR:
                             if (internal && explicit)
-                                return new Cast(actual, Definition.CHAR_OBJ_TYPE, true, false, true, false, false);
+                                return new Cast(OBJECT_TYPE, CHAR_OBJ_TYPE, true, null, CHAR_TYPE, null, null);
 
                             break;
                         case INT:
                             if (internal && explicit)
-                                return new Cast(actual, Definition.INT_OBJ_TYPE, true, false, true, false, false);
+                                return new Cast(OBJECT_TYPE, INT_OBJ_TYPE, true, null, INT_TYPE, null, null);
 
                             break;
                         case LONG:
                             if (internal && explicit)
-                                return new Cast(actual, Definition.LONG_OBJ_TYPE, true, false, true, false, false);
+                                return new Cast(OBJECT_TYPE, LONG_OBJ_TYPE, true, null, LONG_TYPE, null, null);
 
                             break;
                         case FLOAT:
                             if (internal && explicit)
-                                return new Cast(actual, Definition.FLOAT_OBJ_TYPE, true, false, true, false, false);
+                                return new Cast(OBJECT_TYPE, FLOAT_OBJ_TYPE, true, null, FLOAT_TYPE, null, null);
 
                             break;
                         case DOUBLE:
                             if (internal && explicit)
-                                return new Cast(actual, Definition.DOUBLE_OBJ_TYPE, true, false, true, false, false);
+                                return new Cast(OBJECT_TYPE, DOUBLE_OBJ_TYPE, true, null, DOUBLE_TYPE, null, null);
 
                             break;
                     }
@@ -510,37 +561,37 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                 switch (expected.sort) {
                     case BYTE:
                         if (internal && explicit)
-                            return new Cast(actual, Definition.BYTE_OBJ_TYPE, true, false, true, false, false);
+                            return new Cast(NUMBER_TYPE, BYTE_OBJ_TYPE, true, null, BYTE_TYPE, null, null);
 
                         break;
                     case SHORT:
                         if (internal && explicit)
-                            return new Cast(actual, Definition.SHORT_OBJ_TYPE, true, false, true, false, false);
+                            return new Cast(NUMBER_TYPE, SHORT_OBJ_TYPE, true, null, SHORT_TYPE, null, null);
 
                         break;
                     case CHAR:
                         if (internal && explicit)
-                            return new Cast(actual, Definition.CHAR_OBJ_TYPE, true, false, true, false, false);
+                            return new Cast(NUMBER_TYPE, CHAR_OBJ_TYPE, true, null, CHAR_TYPE, null, null);
 
                         break;
                     case INT:
                         if (internal && explicit)
-                            return new Cast(actual, Definition.INT_OBJ_TYPE, true, false, true, false, false);
+                            return new Cast(NUMBER_TYPE, INT_OBJ_TYPE, true, null, INT_TYPE, null, null);
 
                         break;
                     case LONG:
                         if (internal && explicit)
-                            return new Cast(actual, Definition.LONG_OBJ_TYPE, true, false, true, false, false);
+                            return new Cast(NUMBER_TYPE, LONG_OBJ_TYPE, true, null, LONG_TYPE, null, null);
 
                         break;
                     case FLOAT:
                         if (internal && explicit)
-                            return new Cast(actual, Definition.FLOAT_OBJ_TYPE, true, false, true, false, false);
+                            return new Cast(NUMBER_TYPE, FLOAT_OBJ_TYPE, true, null, FLOAT_TYPE, null, null);
 
                         break;
                     case DOUBLE:
                         if (internal && explicit)
-                            return new Cast(actual, Definition.DOUBLE_OBJ_TYPE, true, false, true, false, false);
+                            return new Cast(NUMBER_TYPE, DOUBLE_OBJ_TYPE, true, null, DOUBLE_TYPE, null, null);
 
                         break;
                 }
@@ -550,7 +601,7 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                 switch (expected.sort) {
                     case BOOL:
                         if (internal)
-                            return new Cast(actual, expected, explicit, true, false, false, false);
+                            return new Cast(BOOLEAN_TYPE, BOOLEAN_TYPE, explicit, BOOLEAN_TYPE, null, null, null);
 
                         break;
                 }
@@ -565,12 +616,12 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case FLOAT:
                     case DOUBLE:
                         if (internal)
-                            return new Cast(actual, expected, explicit, true, false, false, false);
+                            return new Cast(BYTE_TYPE, expected, explicit, BYTE_TYPE, null, null, null);
 
                         break;
                     case CHAR:
                         if (internal && explicit)
-                            return new Cast(actual, expected, true, true, false, false, false);
+                            return new Cast(BYTE_TYPE, expected, true, BYTE_TYPE, null, null, null);
 
                         break;
                 }
@@ -584,13 +635,13 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case FLOAT:
                     case DOUBLE:
                         if (internal)
-                            return new Cast(actual, expected, explicit, true, false, false, false);
+                            return new Cast(SHORT_TYPE, expected, explicit, SHORT_TYPE, null, null, null);
 
                         break;
                     case BYTE:
                     case CHAR:
                         if (internal && explicit)
-                            return new Cast(actual, expected, true, true, false, false, false);
+                            return new Cast(SHORT_TYPE, expected, true, SHORT_TYPE, null, null, null);
 
                         break;
                 }
@@ -604,13 +655,13 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case FLOAT:
                     case DOUBLE:
                         if (internal)
-                            return new Cast(actual, expected, explicit, true, false, false, false);
+                            return new Cast(CHAR_TYPE, expected, explicit, CHAR_TYPE, null, null, null);
 
                         break;
                     case BYTE:
                     case SHORT:
                         if (internal && explicit)
-                            return new Cast(actual, expected, true, true, false, false, false);
+                            return new Cast(CHAR_TYPE, expected, true, CHAR_TYPE, null, null, null);
 
                         break;
                 }
@@ -623,14 +674,14 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case FLOAT:
                     case DOUBLE:
                         if (internal)
-                            return new Cast(actual, expected, explicit, true, false, false, false);
+                            return new Cast(INT_TYPE, expected, explicit, INT_TYPE, null, null, null);
 
                         break;
                     case BYTE:
                     case SHORT:
                     case CHAR:
                         if (internal && explicit)
-                            return new Cast(actual, expected, true, true, false, false, false);
+                            return new Cast(INT_TYPE, expected, true, INT_TYPE, null, null, null);
 
                         break;
                 }
@@ -642,7 +693,7 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case FLOAT:
                     case DOUBLE:
                         if (internal)
-                            return new Cast(actual, expected, explicit, true, false, false, false);
+                            return new Cast(LONG_TYPE, expected, explicit, LONG_TYPE, null, null, null);
 
                         break;
                     case BYTE:
@@ -650,7 +701,7 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case CHAR:
                     case INT:
                         if (internal && explicit)
-                            return new Cast(actual, expected, true, true, false, false, false);
+                            return new Cast(LONG_TYPE, expected, true, LONG_TYPE, null, null, null);
 
                         break;
                 }
@@ -661,7 +712,7 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case FLOAT:
                     case DOUBLE:
                         if (internal)
-                            return new Cast(actual, expected, explicit, true, false, false, false);
+                            return new Cast(FLOAT_TYPE, expected, explicit, FLOAT_TYPE, null, null, null);
 
                         break;
                     case BYTE:
@@ -670,7 +721,7 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case INT:
                     case LONG:
                         if (internal && explicit)
-                            return new Cast(actual, expected, true, true, false, false, false);
+                            return new Cast(FLOAT_TYPE, expected, true, FLOAT_TYPE, null, null, null);
 
                         break;
                 }
@@ -678,10 +729,9 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                 break;
             case DOUBLE_OBJ:
                 switch (expected.sort) {
-                    case FLOAT:
                     case DOUBLE:
                         if (internal)
-                            return new Cast(actual, expected, explicit, true, false, false, false);
+                            return new Cast(DOUBLE_TYPE, expected, explicit, DOUBLE_TYPE, null, null, null);
 
                         break;
                     case BYTE:
@@ -689,8 +739,9 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                     case CHAR:
                     case INT:
                     case LONG:
+                    case FLOAT:
                         if (internal && explicit)
-                            return new Cast(actual, expected, true, true, false, false, false);
+                            return new Cast(DOUBLE_TYPE, expected, true, DOUBLE_TYPE, null, null, null);
 
                         break;
                 }
@@ -699,14 +750,21 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
             case DEF:
                 switch (expected.sort) {
                     case BOOL:
+                        return new Cast(DEF_TYPE, BOOLEAN_OBJ_TYPE, explicit, null, BOOLEAN_TYPE, null, null);
                     case BYTE:
+                        return new Cast(DEF_TYPE, BYTE_OBJ_TYPE, explicit, null, BYTE_TYPE, null, null);
                     case SHORT:
+                        return new Cast(DEF_TYPE, SHORT_OBJ_TYPE, explicit, null, SHORT_TYPE, null, null);
                     case CHAR:
+                        return new Cast(DEF_TYPE, CHAR_OBJ_TYPE, explicit, null, CHAR_TYPE, null, null);
                     case INT:
+                        return new Cast(DEF_TYPE, INT_OBJ_TYPE, explicit, null, INT_TYPE, null, null);
                     case LONG:
+                        return new Cast(DEF_TYPE, LONG_OBJ_TYPE, explicit, null, LONG_TYPE, null, null);
                     case FLOAT:
+                        return new Cast(DEF_TYPE, FLOAT_OBJ_TYPE, explicit, null, FLOAT_TYPE, null, null);
                     case DOUBLE:
-                            return new Cast(actual, expected, explicit, true, false, false, false);
+                            return new Cast(DEF_TYPE, DOUBLE_OBJ_TYPE, explicit, null, DOUBLE_TYPE, null, null);
                 }
 
                 break;
@@ -714,7 +772,7 @@ public static Cast getLegalCast(Location location, Type actual, Type expected, b
                 switch (expected.sort) {
                     case CHAR:
                         if (explicit)
-                            return new Cast(actual, expected, true, false, false, false, false);
+                            return new Cast(STRING_TYPE, CHAR_TYPE, true);
 
                         break;
                 }
@@ -773,15 +831,15 @@ public static Type promoteNumeric(Type from, boolean decimal) {
         final Sort sort = from.sort;
 
         if (sort == Sort.DEF) {
-            return Definition.DEF_TYPE;
+            return DEF_TYPE;
         } else if ((sort == Sort.DOUBLE) && decimal) {
-            return Definition.DOUBLE_TYPE;
+            return DOUBLE_TYPE;
         } else if ((sort == Sort.FLOAT) && decimal) {
-            return  Definition.FLOAT_TYPE;
+            return  FLOAT_TYPE;
         } else if (sort == Sort.LONG) {
-            return Definition.LONG_TYPE;
+            return LONG_TYPE;
         } else if (sort == Sort.INT || sort == Sort.CHAR || sort == Sort.SHORT || sort == Sort.BYTE) {
-            return Definition.INT_TYPE;
+            return INT_TYPE;
         }
 
         return null;
@@ -792,24 +850,24 @@ public static Type promoteNumeric(Type from0, Type from1, boolean decimal) {
         final Sort sort1 = from1.sort;
 
         if (sort0 == Sort.DEF || sort1 == Sort.DEF) {
-            return Definition.DEF_TYPE;
+            return DEF_TYPE;
         }
 
         if (decimal) {
             if (sort0 == Sort.DOUBLE || sort1 == Sort.DOUBLE) {
-                return Definition.DOUBLE_TYPE;
+                return DOUBLE_TYPE;
             } else if (sort0 == Sort.FLOAT || sort1 == Sort.FLOAT) {
-                return Definition.FLOAT_TYPE;
+                return FLOAT_TYPE;
             }
         }
 
         if (sort0 == Sort.LONG || sort1 == Sort.LONG) {
-            return Definition.LONG_TYPE;
+            return LONG_TYPE;
         } else if (sort0 == Sort.INT   || sort1 == Sort.INT   ||
                    sort0 == Sort.CHAR  || sort1 == Sort.CHAR  ||
                    sort0 == Sort.SHORT || sort1 == Sort.SHORT ||
                    sort0 == Sort.BYTE  || sort1 == Sort.BYTE) {
-            return Definition.INT_TYPE;
+            return INT_TYPE;
         }
 
         return null;
@@ -820,7 +878,7 @@ public static Type promoteAdd(final Type from0, final Type from1) {
         final Sort sort1 = from1.sort;
 
         if (sort0 == Sort.STRING || sort1 == Sort.STRING) {
-            return Definition.STRING_TYPE;
+            return STRING_TYPE;
         }
 
         return promoteNumeric(from0, from1, true);
@@ -831,11 +889,11 @@ public static Type promoteXor(final Type from0, final Type from1) {
         final Sort sort1 = from1.sort;
 
         if (sort0 == Sort.DEF || sort1 == Sort.DEF) {
-            return Definition.DEF_TYPE;
+            return DEF_TYPE;
         }
 
         if (sort0.bool || sort1.bool) {
-            return Definition.BOOLEAN_TYPE;
+            return BOOLEAN_TYPE;
         }
 
         return promoteNumeric(from0, from1, false);
@@ -846,12 +904,12 @@ public static Type promoteEquality(final Type from0, final Type from1) {
         final Sort sort1 = from1.sort;
 
         if (sort0 == Sort.DEF || sort1 == Sort.DEF) {
-            return Definition.DEF_TYPE;
+            return DEF_TYPE;
         }
 
         if (sort0.primitive && sort1.primitive) {
             if (sort0.bool && sort1.bool) {
-                return Definition.BOOLEAN_TYPE;
+                return BOOLEAN_TYPE;
             }
 
             if (sort0.numeric && sort1.numeric) {
@@ -859,7 +917,7 @@ public static Type promoteEquality(final Type from0, final Type from1) {
             }
         }
 
-        return Definition.OBJECT_TYPE;
+        return OBJECT_TYPE;
     }
 
     public static Type promoteConditional(final Type from0, final Type from1, final Object const0, final Object const1) {
@@ -871,46 +929,46 @@ public static Type promoteConditional(final Type from0, final Type from1, final
         final Sort sort1 = from1.sort;
 
         if (sort0 == Sort.DEF || sort1 == Sort.DEF) {
-            return Definition.DEF_TYPE;
+            return DEF_TYPE;
         }
 
         if (sort0.primitive && sort1.primitive) {
             if (sort0.bool && sort1.bool) {
-                return Definition.BOOLEAN_TYPE;
+                return BOOLEAN_TYPE;
             }
 
             if (sort0 == Sort.DOUBLE || sort1 == Sort.DOUBLE) {
-                return Definition.DOUBLE_TYPE;
+                return DOUBLE_TYPE;
             } else if (sort0 == Sort.FLOAT || sort1 == Sort.FLOAT) {
-                return Definition.FLOAT_TYPE;
+                return FLOAT_TYPE;
             } else if (sort0 == Sort.LONG || sort1 == Sort.LONG) {
-                return Definition.LONG_TYPE;
+                return LONG_TYPE;
             } else {
                 if (sort0 == Sort.BYTE) {
                     if (sort1 == Sort.BYTE) {
-                        return Definition.BYTE_TYPE;
+                        return BYTE_TYPE;
                     } else if (sort1 == Sort.SHORT) {
                         if (const1 != null) {
                             final short constant = (short)const1;
 
                             if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) {
-                                return Definition.BYTE_TYPE;
+                                return BYTE_TYPE;
                             }
                         }
 
-                        return Definition.SHORT_TYPE;
+                        return SHORT_TYPE;
                     } else if (sort1 == Sort.CHAR) {
-                        return Definition.INT_TYPE;
+                        return INT_TYPE;
                     } else if (sort1 == Sort.INT) {
                         if (const1 != null) {
                             final int constant = (int)const1;
 
                             if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) {
-                                return Definition.BYTE_TYPE;
+                                return BYTE_TYPE;
                             }
                         }
 
-                        return Definition.INT_TYPE;
+                        return INT_TYPE;
                     }
                 } else if (sort0 == Sort.SHORT) {
                     if (sort1 == Sort.BYTE) {
@@ -918,43 +976,43 @@ public static Type promoteConditional(final Type from0, final Type from1, final
                             final short constant = (short)const0;
 
                             if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) {
-                                return Definition.BYTE_TYPE;
+                                return BYTE_TYPE;
                             }
                         }
 
-                        return Definition.SHORT_TYPE;
+                        return SHORT_TYPE;
                     } else if (sort1 == Sort.SHORT) {
-                        return Definition.SHORT_TYPE;
+                        return SHORT_TYPE;
                     } else if (sort1 == Sort.CHAR) {
-                        return Definition.INT_TYPE;
+                        return INT_TYPE;
                     } else if (sort1 == Sort.INT) {
                         if (const1 != null) {
                             final int constant = (int)const1;
 
                             if (constant <= Short.MAX_VALUE && constant >= Short.MIN_VALUE) {
-                                return Definition.SHORT_TYPE;
+                                return SHORT_TYPE;
                             }
                         }
 
-                        return Definition.INT_TYPE;
+                        return INT_TYPE;
                     }
                 } else if (sort0 == Sort.CHAR) {
                     if (sort1 == Sort.BYTE) {
-                        return Definition.INT_TYPE;
+                        return INT_TYPE;
                     } else if (sort1 == Sort.SHORT) {
-                        return Definition.INT_TYPE;
+                        return INT_TYPE;
                     } else if (sort1 == Sort.CHAR) {
-                        return Definition.CHAR_TYPE;
+                        return CHAR_TYPE;
                     } else if (sort1 == Sort.INT) {
                         if (const1 != null) {
                             final int constant = (int)const1;
 
                             if (constant <= Character.MAX_VALUE && constant >= Character.MIN_VALUE) {
-                                return Definition.BYTE_TYPE;
+                                return BYTE_TYPE;
                             }
                         }
 
-                        return Definition.INT_TYPE;
+                        return INT_TYPE;
                     }
                 } else if (sort0 == Sort.INT) {
                     if (sort1 == Sort.BYTE) {
@@ -962,33 +1020,33 @@ public static Type promoteConditional(final Type from0, final Type from1, final
                             final int constant = (int)const0;
 
                             if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) {
-                                return Definition.BYTE_TYPE;
+                                return BYTE_TYPE;
                             }
                         }
 
-                        return Definition.INT_TYPE;
+                        return INT_TYPE;
                     } else if (sort1 == Sort.SHORT) {
                         if (const0 != null) {
                             final int constant = (int)const0;
 
                             if (constant <= Short.MAX_VALUE && constant >= Short.MIN_VALUE) {
-                                return Definition.BYTE_TYPE;
+                                return BYTE_TYPE;
                             }
                         }
 
-                        return Definition.INT_TYPE;
+                        return INT_TYPE;
                     } else if (sort1 == Sort.CHAR) {
                         if (const0 != null) {
                             final int constant = (int)const0;
 
                             if (constant <= Character.MAX_VALUE && constant >= Character.MIN_VALUE) {
-                                return Definition.BYTE_TYPE;
+                                return BYTE_TYPE;
                             }
                         }
 
-                        return Definition.INT_TYPE;
+                        return INT_TYPE;
                     } else if (sort1 == Sort.INT) {
-                        return Definition.INT_TYPE;
+                        return INT_TYPE;
                     }
                 }
             }
@@ -998,7 +1056,7 @@ public static Type promoteConditional(final Type from0, final Type from1, final
         //       to calculate the highest upper bound for the two types and return that.
         //       However, for now we just return objectType that may require an extra cast.
 
-        return Definition.OBJECT_TYPE;
+        return OBJECT_TYPE;
     }
 
     private AnalyzerCaster() {}
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java
index b81a9dee24cb1..56c1b2e0ba79f 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java
@@ -43,7 +43,7 @@ final class Compiler {
     /**
      * The maximum number of characters allowed in the script source.
      */
-    static int MAXIMUM_SOURCE_LENGTH = 16384;
+    static final int MAXIMUM_SOURCE_LENGTH = 16384;
 
     /**
      * Define the class with lowest privileges.
@@ -77,39 +77,42 @@ static final class Loader extends SecureClassLoader {
          * Generates a Class object from the generated byte code.
          * @param name The name of the class.
          * @param bytes The generated byte code.
-         * @return A Class object extending {@link Executable}.
+         * @return A Class object extending {@link PainlessScript}.
          */
-        Class define(String name, byte[] bytes) {
-            return defineClass(name, bytes, 0, bytes.length, CODESOURCE).asSubclass(Executable.class);
+        Class define(String name, byte[] bytes) {
+            return defineClass(name, bytes, 0, bytes.length, CODESOURCE).asSubclass(PainlessScript.class);
         }
     }
 
     /**
      * Runs the two-pass compiler to generate a Painless script.
+     * @param  the type of the script
      * @param loader The ClassLoader used to define the script.
+     * @param iface Interface the compiled script should implement
      * @param name The name of the script.
      * @param source The source code for the script.
      * @param settings The CompilerSettings to be used during the compilation.
-     * @return An {@link Executable} Painless script.
+     * @return An executable script that implements both {@code } and is a subclass of {@link PainlessScript}
      */
-    static Executable compile(Loader loader, String name, String source, CompilerSettings settings) {
+    static  T compile(Loader loader, Class iface, String name, String source, CompilerSettings settings) {
         if (source.length() > MAXIMUM_SOURCE_LENGTH) {
             throw new IllegalArgumentException("Scripts may be no longer than " + MAXIMUM_SOURCE_LENGTH +
                 " characters.  The passed in script is " + source.length() + " characters.  Consider using a" +
                 " plugin if a script longer than this length is a requirement.");
         }
+        ScriptInterface scriptInterface = new ScriptInterface(iface);
 
-        SSource root = Walker.buildPainlessTree(name, source, settings, null);
+        SSource root = Walker.buildPainlessTree(scriptInterface, name, source, settings, null);
 
         root.analyze();
         root.write();
 
         try {
-            Class clazz = loader.define(CLASS_NAME, root.getBytes());
-            java.lang.reflect.Constructor constructor =
+            Class clazz = loader.define(CLASS_NAME, root.getBytes());
+            java.lang.reflect.Constructor constructor =
                     clazz.getConstructor(String.class, String.class, BitSet.class);
 
-            return constructor.newInstance(name, source, root.getStatements());
+            return iface.cast(constructor.newInstance(name, source, root.getStatements()));
         } catch (Exception exception) { // Catch everything to let the user know this is something caused internally.
             throw new IllegalStateException("An internal error occurred attempting to define the script [" + name + "].", exception);
         }
@@ -117,18 +120,20 @@ static Executable compile(Loader loader, String name, String source, CompilerSet
 
     /**
      * Runs the two-pass compiler to generate a Painless script.  (Used by the debugger.)
+     * @param iface Interface the compiled script should implement
      * @param source The source code for the script.
      * @param settings The CompilerSettings to be used during the compilation.
      * @return The bytes for compilation.
      */
-    static byte[] compile(String name, String source, CompilerSettings settings, Printer debugStream) {
+    static byte[] compile(Class iface, String name, String source, CompilerSettings settings, Printer debugStream) {
         if (source.length() > MAXIMUM_SOURCE_LENGTH) {
             throw new IllegalArgumentException("Scripts may be no longer than " + MAXIMUM_SOURCE_LENGTH +
                 " characters.  The passed in script is " + source.length() + " characters.  Consider using a" +
                 " plugin if a script longer than this length is a requirement.");
         }
+        ScriptInterface scriptInterface = new ScriptInterface(iface);
 
-        SSource root = Walker.buildPainlessTree(name, source, settings, debugStream);
+        SSource root = Walker.buildPainlessTree(scriptInterface, name, source, settings, debugStream);
 
         root.analyze();
         root.write();
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java
index 9f03540c2d869..16f0339677e3a 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java
@@ -87,6 +87,7 @@ public final class Definition {
     public static final Type CHAR_OBJ_TYPE = getType("Character");
     public static final Type OBJECT_TYPE = getType("Object");
     public static final Type DEF_TYPE = getType("def");
+    public static final Type NUMBER_TYPE = getType("Number");
     public static final Type STRING_TYPE = getType("String");
     public static final Type EXCEPTION_TYPE = getType("Exception");
     public static final Type PATTERN_TYPE = getType("Pattern");
@@ -434,23 +435,23 @@ public static class Cast {
         public final Type from;
         public final Type to;
         public final boolean explicit;
-        public final boolean unboxFrom;
-        public final boolean unboxTo;
-        public final boolean boxFrom;
-        public final boolean boxTo;
+        public final Type unboxFrom;
+        public final Type unboxTo;
+        public final Type boxFrom;
+        public final Type boxTo;
 
         public Cast(final Type from, final Type to, final boolean explicit) {
             this.from = from;
             this.to = to;
             this.explicit = explicit;
-            this.unboxFrom = false;
-            this.unboxTo = false;
-            this.boxFrom = false;
-            this.boxTo = false;
+            this.unboxFrom = null;
+            this.unboxTo = null;
+            this.boxFrom = null;
+            this.boxTo = null;
         }
 
         public Cast(final Type from, final Type to, final boolean explicit,
-                    final boolean unboxFrom, final boolean unboxTo, final boolean boxFrom, final boolean boxTo) {
+                    final Type unboxFrom, final Type unboxTo, final Type boxFrom, final Type boxTo) {
             this.from = from;
             this.to = to;
             this.explicit = explicit;
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Executable.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Executable.java
deleted file mode 100644
index c02c66ffc1022..0000000000000
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Executable.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.painless;
-
-import org.apache.lucene.search.Scorer;
-import org.elasticsearch.search.lookup.LeafDocLookup;
-
-import java.util.BitSet;
-import java.util.Map;
-
-/**
- * The superclass used to build all Painless scripts on top of.
- */
-public abstract class Executable {
-
-    private final String name;
-    private final String source;
-    private final BitSet statements;
-
-    public Executable(String name, String source, BitSet statements) {
-        this.name = name;
-        this.source = source;
-        this.statements = statements;
-    }
-
-    public String getName() {
-        return name;
-    }
-
-    public String getSource() {
-        return source;
-    }
-
-    /**
-     * Finds the start of the first statement boundary that is
-     * on or before {@code offset}. If one is not found, {@code -1}
-     * is returned.
-     */
-    public int getPreviousStatement(int offset) {
-        return statements.previousSetBit(offset);
-    }
-
-    /**
-     * Finds the start of the first statement boundary that is
-     * after {@code offset}. If one is not found, {@code -1}
-     * is returned.
-     */
-    public int getNextStatement(int offset) {
-        return statements.nextSetBit(offset+1);
-    }
-
-    public abstract Object execute(Map params, Scorer scorer, LeafDocLookup doc, Object value);
-}
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/NeedsScore.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/GenericElasticsearchScript.java
similarity index 63%
rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/NeedsScore.java
rename to modules/lang-painless/src/main/java/org/elasticsearch/painless/GenericElasticsearchScript.java
index 6812e1019b22d..69f5462c22813 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/NeedsScore.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/GenericElasticsearchScript.java
@@ -16,10 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+
 package org.elasticsearch.painless;
 
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+
+import java.util.Map;
+
 /**
- * Marker interface that a generated {@link Executable} uses the {@code _score} value
+ * Generic script interface that Painless implements for all Elasticsearch scripts.
  */
-public interface NeedsScore {
+public interface GenericElasticsearchScript {
+    String[] ARGUMENTS = new String[] {"params", "_score", "doc", "_value", "ctx"};
+    Object execute(Map params, double _score, Map> doc, Object _value, Map ctx);
+
+    boolean uses$_score();
+    boolean uses$ctx();
 }
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java
index 6df90a5ef08c9..9bbe9d9def39a 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java
@@ -22,6 +22,7 @@
 import org.elasticsearch.painless.Definition.Method;
 import org.elasticsearch.painless.Definition.MethodKey;
 import org.elasticsearch.painless.Definition.Type;
+import org.elasticsearch.painless.ScriptInterface.MethodArgument;
 
 import java.util.Arrays;
 import java.util.Collection;
@@ -37,36 +38,14 @@
  */
 public final class Locals {
 
-    /** Reserved word: params map parameter */
-    public static final String PARAMS = "params";
-    /** Reserved word: Lucene scorer parameter */
-    public static final String SCORER = "#scorer";
-    /** Reserved word: _value variable for aggregations */
-    public static final String VALUE  = "_value";
-    /** Reserved word: _score variable for search scripts */
-    public static final String SCORE  = "_score";
-    /** Reserved word: ctx map for executable scripts */
-    public static final String CTX    = "ctx";
     /** Reserved word: loop counter */
     public static final String LOOP   = "#loop";
     /** Reserved word: unused */
     public static final String THIS   = "#this";
-    /** Reserved word: unused */
-    public static final String DOC    = "doc";
-
-    /** Map of always reserved keywords for the main scope */
-    public static final Set MAIN_KEYWORDS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
-        THIS,PARAMS,SCORER,DOC,VALUE,SCORE,CTX,LOOP
-    )));
 
-    /** Map of always reserved keywords for a function scope */
-    public static final Set FUNCTION_KEYWORDS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
-        THIS,LOOP
-    )));
-
-    /** Map of always reserved keywords for a lambda scope */
-    public static final Set LAMBDA_KEYWORDS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
-        THIS,LOOP
+    /** Set of reserved keywords. */
+    public static final Set KEYWORDS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
+        THIS, LOOP
     )));
 
     /** Creates a new local variable scope (e.g. loop) inside the current scope */
@@ -81,7 +60,7 @@ public static Locals newLocalScope(Locals currentScope) {
      */
     public static Locals newLambdaScope(Locals programScope, Type returnType, List parameters,
                                         int captureCount, int maxLoopCounter) {
-        Locals locals = new Locals(programScope, returnType, LAMBDA_KEYWORDS);
+        Locals locals = new Locals(programScope, returnType, KEYWORDS);
         for (int i = 0; i < parameters.size(); i++) {
             Parameter parameter = parameters.get(i);
             // TODO: allow non-captures to be r/w:
@@ -100,7 +79,7 @@ public static Locals newLambdaScope(Locals programScope, Type returnType, List

parameters, int maxLoopCounter) { - Locals locals = new Locals(programScope, returnType, FUNCTION_KEYWORDS); + Locals locals = new Locals(programScope, returnType, KEYWORDS); for (Parameter parameter : parameters) { locals.addVariable(parameter.location, parameter.type, parameter.name, false); } @@ -112,33 +91,14 @@ public static Locals newFunctionScope(Locals programScope, Type returnType, List } /** Creates a new main method scope */ - public static Locals newMainMethodScope(Locals programScope, boolean usesScore, boolean usesCtx, int maxLoopCounter) { - Locals locals = new Locals(programScope, Definition.OBJECT_TYPE, MAIN_KEYWORDS); - // This reference. Internal use only. + public static Locals newMainMethodScope(ScriptInterface scriptInterface, Locals programScope, int maxLoopCounter) { + Locals locals = new Locals(programScope, scriptInterface.getExecuteMethodReturnType(), KEYWORDS); + // This reference. Internal use only. locals.defineVariable(null, Definition.getType("Object"), THIS, true); - // Input map of variables passed to the script. - locals.defineVariable(null, Definition.getType("Map"), PARAMS, true); - - // Scorer parameter passed to the script. Internal use only. - locals.defineVariable(null, Definition.DEF_TYPE, SCORER, true); - - // Doc parameter passed to the script. TODO: Currently working as a Map, we can do better? - locals.defineVariable(null, Definition.getType("Map"), DOC, true); - - // Aggregation _value parameter passed to the script. - locals.defineVariable(null, Definition.DEF_TYPE, VALUE, true); - - // Shortcut variables. - - // Document's score as a read-only double. - if (usesScore) { - locals.defineVariable(null, Definition.DOUBLE_TYPE, SCORE, true); - } - - // The ctx map set by executable scripts as a read-only map. - if (usesCtx) { - locals.defineVariable(null, Definition.getType("Map"), CTX, true); + // Method arguments + for (MethodArgument arg : scriptInterface.getExecuteArguments()) { + locals.defineVariable(null, arg.getType(), arg.getName(), true); } // Loop counter to catch infinite loops. Internal use only. diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java index eb1df00edd2a8..4a4803844a4c4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java @@ -66,7 +66,7 @@ public RuntimeException createError(RuntimeException exception) { return exception; } - // This maximum length is theoretically 65535 bytes, but as it's CESU-8 encoded we dont know how large it is in bytes, so be safe + // This maximum length is theoretically 65535 bytes, but as it's CESU-8 encoded we don't know how large it is in bytes, so be safe private static final int MAX_NAME_LENGTH = 256; /** Computes the file name (mostly important for stacktraces) */ diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java index 7e56bf49156bf..ac902ee134e13 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java @@ -131,51 +131,48 @@ public void writeLoopCounter(int slot, int count, Location location) { public void writeCast(final Cast cast) { if (cast != null) { - final Type from = cast.from; - final Type to = cast.to; - - if (from.sort == Sort.CHAR && to.sort == Sort.STRING) { + if (cast.from.sort == Sort.CHAR && cast.to.sort == Sort.STRING) { invokeStatic(UTILITY_TYPE, CHAR_TO_STRING); - } else if (from.sort == Sort.STRING && to.sort == Sort.CHAR) { + } else if (cast.from.sort == Sort.STRING && cast.to.sort == Sort.CHAR) { invokeStatic(UTILITY_TYPE, STRING_TO_CHAR); - } else if (cast.unboxFrom) { - if (from.sort == Sort.DEF) { + } else if (cast.unboxFrom != null) { + unbox(cast.unboxFrom.type); + writeCast(cast.from, cast.to); + } else if (cast.unboxTo != null) { + if (cast.from.sort == Sort.DEF) { if (cast.explicit) { - if (to.sort == Sort.BOOL) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); - else if (to.sort == Sort.BYTE) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_EXPLICIT); - else if (to.sort == Sort.SHORT) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_EXPLICIT); - else if (to.sort == Sort.CHAR) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_EXPLICIT); - else if (to.sort == Sort.INT) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_EXPLICIT); - else if (to.sort == Sort.LONG) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_EXPLICIT); - else if (to.sort == Sort.FLOAT) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_EXPLICIT); - else if (to.sort == Sort.DOUBLE) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_EXPLICIT); + if (cast.to.sort == Sort.BOOL_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); + else if (cast.to.sort == Sort.BYTE_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_EXPLICIT); + else if (cast.to.sort == Sort.SHORT_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_EXPLICIT); + else if (cast.to.sort == Sort.CHAR_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_EXPLICIT); + else if (cast.to.sort == Sort.INT_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_EXPLICIT); + else if (cast.to.sort == Sort.LONG_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_EXPLICIT); + else if (cast.to.sort == Sort.FLOAT_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_EXPLICIT); + else if (cast.to.sort == Sort.DOUBLE_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_EXPLICIT); else throw new IllegalStateException("Illegal tree structure."); } else { - if (to.sort == Sort.BOOL) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); - else if (to.sort == Sort.BYTE) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_IMPLICIT); - else if (to.sort == Sort.SHORT) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_IMPLICIT); - else if (to.sort == Sort.CHAR) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_IMPLICIT); - else if (to.sort == Sort.INT) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_IMPLICIT); - else if (to.sort == Sort.LONG) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_IMPLICIT); - else if (to.sort == Sort.FLOAT) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_IMPLICIT); - else if (to.sort == Sort.DOUBLE) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_IMPLICIT); + if (cast.to.sort == Sort.BOOL_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); + else if (cast.to.sort == Sort.BYTE_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_IMPLICIT); + else if (cast.to.sort == Sort.SHORT_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_IMPLICIT); + else if (cast.to.sort == Sort.CHAR_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_IMPLICIT); + else if (cast.to.sort == Sort.INT_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_IMPLICIT); + else if (cast.to.sort == Sort.LONG_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_IMPLICIT); + else if (cast.to.sort == Sort.FLOAT_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_IMPLICIT); + else if (cast.to.sort == Sort.DOUBLE_OBJ) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_IMPLICIT); else throw new IllegalStateException("Illegal tree structure."); } } else { - unbox(from.type); - writeCast(from, to); + writeCast(cast.from, cast.to); + unbox(cast.unboxTo.type); } - } else if (cast.unboxTo) { - writeCast(from, to); - unbox(to.type); - } else if (cast.boxFrom) { - box(from.type); - writeCast(from, to); - } else if (cast.boxTo) { - writeCast(from, to); - box(to.type); + } else if (cast.boxFrom != null) { + box(cast.boxFrom.type); + writeCast(cast.from, cast.to); + } else if (cast.boxTo != null) { + writeCast(cast.from, cast.to); + box(cast.boxTo.type); } else { - writeCast(from, to); + writeCast(cast.from, cast.to); } } } @@ -269,19 +266,19 @@ public void writeToStrings() { } /** Writes a dynamic binary instruction: returnType, lhs, and rhs can be different */ - public void writeDynamicBinaryInstruction(Location location, Type returnType, Type lhs, Type rhs, + public void writeDynamicBinaryInstruction(Location location, Type returnType, Type lhs, Type rhs, Operation operation, int flags) { org.objectweb.asm.Type methodType = org.objectweb.asm.Type.getMethodType(returnType.type, lhs.type, rhs.type); - + switch (operation) { case MUL: - invokeDefCall("mul", methodType, DefBootstrap.BINARY_OPERATOR, flags); + invokeDefCall("mul", methodType, DefBootstrap.BINARY_OPERATOR, flags); break; case DIV: - invokeDefCall("div", methodType, DefBootstrap.BINARY_OPERATOR, flags); + invokeDefCall("div", methodType, DefBootstrap.BINARY_OPERATOR, flags); break; case REM: - invokeDefCall("rem", methodType, DefBootstrap.BINARY_OPERATOR, flags); + invokeDefCall("rem", methodType, DefBootstrap.BINARY_OPERATOR, flags); break; case ADD: // if either side is primitive, then the + operator should always throw NPE on null, @@ -294,31 +291,31 @@ public void writeDynamicBinaryInstruction(Location location, Type returnType, Ty invokeDefCall("add", methodType, DefBootstrap.BINARY_OPERATOR, flags); break; case SUB: - invokeDefCall("sub", methodType, DefBootstrap.BINARY_OPERATOR, flags); + invokeDefCall("sub", methodType, DefBootstrap.BINARY_OPERATOR, flags); break; case LSH: invokeDefCall("lsh", methodType, DefBootstrap.SHIFT_OPERATOR, flags); break; case USH: - invokeDefCall("ush", methodType, DefBootstrap.SHIFT_OPERATOR, flags); + invokeDefCall("ush", methodType, DefBootstrap.SHIFT_OPERATOR, flags); break; case RSH: - invokeDefCall("rsh", methodType, DefBootstrap.SHIFT_OPERATOR, flags); + invokeDefCall("rsh", methodType, DefBootstrap.SHIFT_OPERATOR, flags); break; - case BWAND: + case BWAND: invokeDefCall("and", methodType, DefBootstrap.BINARY_OPERATOR, flags); break; - case XOR: + case XOR: invokeDefCall("xor", methodType, DefBootstrap.BINARY_OPERATOR, flags); break; - case BWOR: + case BWOR: invokeDefCall("or", methodType, DefBootstrap.BINARY_OPERATOR, flags); break; default: throw location.createError(new IllegalStateException("Illegal tree structure.")); } } - + /** Writes a static binary instruction */ public void writeBinaryInstruction(Location location, Type type, Operation operation) { final Sort sort = type.sort; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExplainError.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExplainError.java index 75a1d2392cd34..fff692fdb9f3e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExplainError.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExplainError.java @@ -46,7 +46,7 @@ Object getObjectToExplain() { /** * Headers to be added to the {@link ScriptException} for structured rendering. */ - Map> getHeaders() { + public Map> getHeaders() { Map> headers = new TreeMap<>(); String toString = "null"; String javaClassName = null; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java new file mode 100644 index 0000000000000..b4e7c157f1fe2 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.script.ScriptException; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; +import java.util.Map; + +/** + * Abstract superclass on top of which all Painless scripts are built. + */ +public abstract class PainlessScript { + /** + * Name of the script set at compile time. + */ + private final String name; + /** + * Source of the script. + */ + private final String source; + /** + * Character number of the start of each statement. + */ + private final BitSet statements; + + protected PainlessScript(String name, String source, BitSet statements) { + this.name = name; + this.source = source; + this.statements = statements; + } + + /** + * Adds stack trace and other useful information to exceptions thrown + * from a Painless script. + * @param t The throwable to build an exception around. + * @return The generated ScriptException. + */ + protected final ScriptException convertToScriptException(Throwable t, Map> extraMetadata) { + // create a script stack: this is just the script portion + List scriptStack = new ArrayList<>(); + for (StackTraceElement element : t.getStackTrace()) { + if (WriterConstants.CLASS_NAME.equals(element.getClassName())) { + // found the script portion + int offset = element.getLineNumber(); + if (offset == -1) { + scriptStack.add("<<< unknown portion of script >>>"); + } else { + offset--; // offset is 1 based, line numbers must be! + int startOffset = getPreviousStatement(offset); + if (startOffset == -1) { + assert false; // should never happen unless we hit exc in ctor prologue... + startOffset = 0; + } + int endOffset = getNextStatement(startOffset); + if (endOffset == -1) { + endOffset = source.length(); + } + // TODO: if this is still too long, truncate and use ellipses + String snippet = source.substring(startOffset, endOffset); + scriptStack.add(snippet); + StringBuilder pointer = new StringBuilder(); + for (int i = startOffset; i < offset; i++) { + pointer.append(' '); + } + pointer.append("^---- HERE"); + scriptStack.add(pointer.toString()); + } + break; + // but filter our own internal stacks (e.g. indy bootstrap) + } else if (!shouldFilter(element)) { + scriptStack.add(element.toString()); + } + } + // build a name for the script: + final String name; + if (PainlessScriptEngineService.INLINE_NAME.equals(this.name)) { + name = source; + } else { + name = this.name; + } + ScriptException scriptException = new ScriptException("runtime error", t, scriptStack, name, PainlessScriptEngineService.NAME); + for (Map.Entry> entry : extraMetadata.entrySet()) { + scriptException.addMetadata(entry.getKey(), entry.getValue()); + } + return scriptException; + } + + /** returns true for methods that are part of the runtime */ + private static boolean shouldFilter(StackTraceElement element) { + return element.getClassName().startsWith("org.elasticsearch.painless.") || + element.getClassName().startsWith("java.lang.invoke.") || + element.getClassName().startsWith("sun.invoke."); + } + + /** + * Finds the start of the first statement boundary that is on or before {@code offset}. If one is not found, {@code -1} is returned. + */ + private int getPreviousStatement(int offset) { + return statements.previousSetBit(offset); + } + + /** + * Finds the start of the first statement boundary that is after {@code offset}. If one is not found, {@code -1} is returned. + */ + private int getNextStatement(int offset) { + return statements.nextSetBit(offset + 1); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java index 4d20a20bd6663..a8dc045674a49 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java @@ -109,6 +109,10 @@ public String getExtension() { @Override public Object compile(String scriptName, final String scriptSource, final Map params) { + return compile(GenericElasticsearchScript.class, scriptName, scriptSource, params); + } + + T compile(Class iface, String scriptName, final String scriptSource, final Map params) { final CompilerSettings compilerSettings; if (params.isEmpty()) { @@ -161,10 +165,11 @@ public Loader run() { try { // Drop all permissions to actually compile the code itself. - return AccessController.doPrivileged(new PrivilegedAction() { + return AccessController.doPrivileged(new PrivilegedAction() { @Override - public Executable run() { - return Compiler.compile(loader, scriptName == null ? INLINE_NAME : scriptName, scriptSource, compilerSettings); + public T run() { + String name = scriptName == null ? INLINE_NAME : scriptName; + return Compiler.compile(loader, iface, name, scriptSource, compilerSettings); } }, COMPILATION_CONTEXT); // Note that it is safe to catch any of the following errors since Painless is stateless. @@ -181,7 +186,7 @@ public Executable run() { */ @Override public ExecutableScript executable(final CompiledScript compiledScript, final Map vars) { - return new ScriptImpl((Executable)compiledScript.compiled(), vars, null); + return new ScriptImpl((GenericElasticsearchScript) compiledScript.compiled(), vars, null); } /** @@ -201,7 +206,7 @@ public SearchScript search(final CompiledScript compiledScript, final SearchLook */ @Override public LeafSearchScript getLeafSearchScript(final LeafReaderContext context) throws IOException { - return new ScriptImpl((Executable)compiledScript.compiled(), vars, lookup.getLeafSearchLookup(context)); + return new ScriptImpl((GenericElasticsearchScript) compiledScript.compiled(), vars, lookup.getLeafSearchLookup(context)); } /** @@ -209,7 +214,7 @@ public LeafSearchScript getLeafSearchScript(final LeafReaderContext context) thr */ @Override public boolean needsScores() { - return compiledScript.compiled() instanceof NeedsScore; + return ((GenericElasticsearchScript) compiledScript.compiled()).uses$_score(); } }; } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java index 7af6a65d25132..c07a340838660 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java @@ -20,18 +20,16 @@ package org.elasticsearch.painless; import org.apache.lucene.search.Scorer; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.LeafSearchScript; -import org.elasticsearch.script.ScriptException; import org.elasticsearch.search.lookup.LeafDocLookup; import org.elasticsearch.search.lookup.LeafSearchLookup; -import java.util.ArrayList; +import java.io.IOException; import java.util.HashMap; -import java.util.List; import java.util.Map; - -import static java.util.Collections.emptyMap; +import java.util.function.Function; /** * ScriptImpl can be used as either an {@link ExecutableScript} or a {@link LeafSearchScript} @@ -40,9 +38,9 @@ final class ScriptImpl implements ExecutableScript, LeafSearchScript { /** - * The Painless Executable script that can be run. + * The Painless script that can be run. */ - private final Executable executable; + private final GenericElasticsearchScript script; /** * A map that can be used to access input parameters at run-time. @@ -59,6 +57,16 @@ final class ScriptImpl implements ExecutableScript, LeafSearchScript { */ private final LeafDocLookup doc; + /** + * Looks up the {@code _score} from {@link #scorer} if {@code _score} is used, otherwise returns {@code 0.0}. + */ + private final ScoreLookup scoreLookup; + + /** + * Looks up the {@code ctx} from the {@link #variables} if {@code ctx} is used, otherwise return {@code null}. + */ + private final Function, Map> ctxLookup; + /** * Current scorer being used * @see #setScorer(Scorer) @@ -73,12 +81,12 @@ final class ScriptImpl implements ExecutableScript, LeafSearchScript { /** * Creates a ScriptImpl for the a previously compiled Painless script. - * @param executable The previously compiled Painless script. + * @param script The previously compiled Painless script. * @param vars The initial variables to run the script with. * @param lookup The lookup to allow search fields to be available if this is run as a search script. */ - ScriptImpl(final Executable executable, final Map vars, final LeafSearchLookup lookup) { - this.executable = executable; + ScriptImpl(final GenericElasticsearchScript script, final Map vars, final LeafSearchLookup lookup) { + this.script = script; this.lookup = lookup; this.variables = new HashMap<>(); @@ -92,6 +100,9 @@ final class ScriptImpl implements ExecutableScript, LeafSearchScript { } else { doc = null; } + + scoreLookup = script.uses$_score() ? ScriptImpl::getScore : scorer -> 0.0; + ctxLookup = script.uses$ctx() ? variables -> (Map) variables.get("ctx") : variables -> null; } /** @@ -119,77 +130,7 @@ public void setNextAggregationValue(Object value) { */ @Override public Object run() { - try { - return executable.execute(variables, scorer, doc, aggregationValue); - } catch (PainlessExplainError e) { - throw convertToScriptException(e, e.getHeaders()); - // Note that it is safe to catch any of the following errors since Painless is stateless. - } catch (PainlessError | BootstrapMethodError | OutOfMemoryError | StackOverflowError | Exception e) { - throw convertToScriptException(e, emptyMap()); - } - } - - /** - * Adds stack trace and other useful information to exceptions thrown - * from a Painless script. - * @param t The throwable to build an exception around. - * @return The generated ScriptException. - */ - private ScriptException convertToScriptException(Throwable t, Map> metadata) { - // create a script stack: this is just the script portion - List scriptStack = new ArrayList<>(); - for (StackTraceElement element : t.getStackTrace()) { - if (WriterConstants.CLASS_NAME.equals(element.getClassName())) { - // found the script portion - int offset = element.getLineNumber(); - if (offset == -1) { - scriptStack.add("<<< unknown portion of script >>>"); - } else { - offset--; // offset is 1 based, line numbers must be! - int startOffset = executable.getPreviousStatement(offset); - if (startOffset == -1) { - assert false; // should never happen unless we hit exc in ctor prologue... - startOffset = 0; - } - int endOffset = executable.getNextStatement(startOffset); - if (endOffset == -1) { - endOffset = executable.getSource().length(); - } - // TODO: if this is still too long, truncate and use ellipses - String snippet = executable.getSource().substring(startOffset, endOffset); - scriptStack.add(snippet); - StringBuilder pointer = new StringBuilder(); - for (int i = startOffset; i < offset; i++) { - pointer.append(' '); - } - pointer.append("^---- HERE"); - scriptStack.add(pointer.toString()); - } - break; - // but filter our own internal stacks (e.g. indy bootstrap) - } else if (!shouldFilter(element)) { - scriptStack.add(element.toString()); - } - } - // build a name for the script: - final String name; - if (PainlessScriptEngineService.INLINE_NAME.equals(executable.getName())) { - name = executable.getSource(); - } else { - name = executable.getName(); - } - ScriptException scriptException = new ScriptException("runtime error", t, scriptStack, name, PainlessScriptEngineService.NAME); - for (Map.Entry> entry : metadata.entrySet()) { - scriptException.addMetadata(entry.getKey(), entry.getValue()); - } - return scriptException; - } - - /** returns true for methods that are part of the runtime */ - private static boolean shouldFilter(StackTraceElement element) { - return element.getClassName().startsWith("org.elasticsearch.painless.") || - element.getClassName().startsWith("java.lang.invoke.") || - element.getClassName().startsWith("sun.invoke."); + return script.execute(variables, scoreLookup.apply(scorer), doc, aggregationValue, ctxLookup.apply(variables)); } /** @@ -240,4 +181,16 @@ public void setSource(final Map source) { lookup.source().setSource(source); } } + + private static double getScore(Scorer scorer) { + try { + return scorer.score(); + } catch (IOException e) { + throw new ElasticsearchException("couldn't lookup score", e); + } + } + + interface ScoreLookup { + double apply(Scorer scorer); + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptInterface.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptInterface.java new file mode 100644 index 0000000000000..b8ab32d1c6d88 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptInterface.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import java.lang.invoke.MethodType; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import java.util.function.Function; + +import static java.util.Collections.unmodifiableList; +import static org.elasticsearch.painless.WriterConstants.USES_PARAMETER_METHOD_TYPE; + +/** + * Information about the interface being implemented by the painless script. + */ +public class ScriptInterface { + private final Class iface; + private final org.objectweb.asm.commons.Method executeMethod; + private final Definition.Type executeMethodReturnType; + private final List executeArguments; + private final List usesMethods; + + public ScriptInterface(Class iface) { + this.iface = iface; + + // Find the main method and the uses$argName methods + java.lang.reflect.Method executeMethod = null; + List usesMethods = new ArrayList<>(); + for (java.lang.reflect.Method m : iface.getMethods()) { + if (m.isDefault()) { + continue; + } + if (m.getName().equals("execute")) { + if (executeMethod == null) { + executeMethod = m; + } else { + throw new IllegalArgumentException( + "Painless can only implement interfaces that have a single method named [execute] but [" + iface.getName() + + "] has more than one."); + } + continue; + } + if (m.getName().startsWith("uses$")) { + if (false == m.getReturnType().equals(boolean.class)) { + throw new IllegalArgumentException("Painless can only implement uses$ methods that return boolean but [" + + iface.getName() + "#" + m.getName() + "] returns [" + m.getReturnType().getName() + "]."); + } + if (m.getParameterTypes().length > 0) { + throw new IllegalArgumentException("Painless can only implement uses$ methods that do not take parameters but [" + + iface.getName() + "#" + m.getName() + "] does."); + } + usesMethods.add(new org.objectweb.asm.commons.Method(m.getName(), USES_PARAMETER_METHOD_TYPE.toMethodDescriptorString())); + continue; + } + throw new IllegalArgumentException("Painless can only implement methods named [execute] and [uses$argName] but [" + + iface.getName() + "] contains a method named [" + m.getName() + "]"); + } + MethodType methodType = MethodType.methodType(executeMethod.getReturnType(), executeMethod.getParameterTypes()); + this.executeMethod = new org.objectweb.asm.commons.Method(executeMethod.getName(), methodType.toMethodDescriptorString()); + executeMethodReturnType = definitionTypeForClass(executeMethod.getReturnType(), + componentType -> "Painless can only implement execute methods returning a whitelisted type but [" + iface.getName() + + "#execute] returns [" + componentType.getName() + "] which isn't whitelisted."); + + // Look up the argument names + Set argumentNames = new LinkedHashSet<>(); + List arguments = new ArrayList<>(); + String[] argumentNamesConstant = readArgumentNamesConstant(iface); + Class[] types = executeMethod.getParameterTypes(); + if (argumentNamesConstant.length != types.length) { + throw new IllegalArgumentException("[" + iface.getName() + "#ARGUMENTS] has length [2] but [" + + iface.getName() + "#execute] takes [1] argument."); + } + for (int arg = 0; arg < types.length; arg++) { + arguments.add(methodArgument(types[arg], argumentNamesConstant[arg])); + argumentNames.add(argumentNamesConstant[arg]); + } + this.executeArguments = unmodifiableList(arguments); + + // Validate that the uses$argName methods reference argument names + for (org.objectweb.asm.commons.Method usesMethod : usesMethods) { + if (false == argumentNames.contains(usesMethod.getName().substring("uses$".length()))) { + throw new IllegalArgumentException("Painless can only implement uses$ methods that match a parameter name but [" + + iface.getName() + "#" + usesMethod.getName() + "] doesn't match any of " + argumentNames + "."); + } + } + this.usesMethods = unmodifiableList(usesMethods); + } + + /** + * The interface that the Painless script should implement. + */ + public Class getInterface() { + return iface; + } + + /** + * An asm method descriptor for the {@code execute} method. + */ + public org.objectweb.asm.commons.Method getExecuteMethod() { + return executeMethod; + } + + /** + * The Painless {@link Definition.Type} or the return type of the {@code execute} method. This is used to generate the appropriate + * return bytecode. + */ + public Definition.Type getExecuteMethodReturnType() { + return executeMethodReturnType; + } + + /** + * Painless {@link Definition.Type}s and names of the arguments to the {@code execute} method. The names are exposed to the Painless + * script. + */ + public List getExecuteArguments() { + return executeArguments; + } + + /** + * The {@code uses$varName} methods that must be implemented by Painless to complete implementing the interface. + */ + public List getUsesMethods() { + return usesMethods; + } + + /** + * Painless {@link Definition.Type}s and name of the argument to the {@code execute} method. + */ + public static class MethodArgument { + private final Definition.Type type; + private final String name; + + public MethodArgument(Definition.Type type, String name) { + this.type = type; + this.name = name; + } + + public Definition.Type getType() { + return type; + } + + public String getName() { + return name; + } + } + + private static MethodArgument methodArgument(Class type, String argName) { + Definition.Type defType = definitionTypeForClass(type, componentType -> "[" + argName + "] is of unknown type [" + + componentType.getName() + ". Painless interfaces can only accept arguments that are of whitelisted types."); + return new MethodArgument(defType, argName); + } + + private static Definition.Type definitionTypeForClass(Class type, Function, String> unknownErrorMessageSource) { + int dimensions = 0; + Class componentType = type; + while (componentType.isArray()) { + dimensions++; + componentType = componentType.getComponentType(); + } + Definition.Struct struct; + if (componentType.equals(Object.class)) { + struct = Definition.DEF_TYPE.struct; + } else { + Definition.RuntimeClass runtimeClass = Definition.getRuntimeClass(componentType); + if (runtimeClass == null) { + throw new IllegalArgumentException(unknownErrorMessageSource.apply(componentType)); + } + struct = runtimeClass.getStruct(); + } + return Definition.getType(struct, dimensions); + } + + private static String[] readArgumentNamesConstant(Class iface) { + Field argumentNamesField; + try { + argumentNamesField = iface.getField("ARGUMENTS"); + } catch (NoSuchFieldException e) { + throw new IllegalArgumentException("Painless needs a constant [String[] ARGUMENTS] on all interfaces it implements with the " + + "names of the method arguments but [" + iface.getName() + "] doesn't have one.", e); + } + if (false == argumentNamesField.getType().equals(String[].class)) { + throw new IllegalArgumentException("Painless needs a constant [String[] ARGUMENTS] on all interfaces it implements with the " + + "names of the method arguments but [" + iface.getName() + "] doesn't have one."); + } + try { + return (String[]) argumentNamesField.get(null); + } catch (IllegalArgumentException | IllegalAccessException e) { + throw new IllegalArgumentException("Error trying to read [" + iface.getName() + "#ARGUMENTS]", e); + } + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java index 32019909a6057..6ae637e59b1e5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java @@ -19,9 +19,8 @@ package org.elasticsearch.painless; -import org.apache.lucene.search.Scorer; import org.elasticsearch.painless.api.Augmentation; -import org.elasticsearch.search.lookup.LeafDocLookup; +import org.elasticsearch.script.ScriptException; import org.objectweb.asm.Handle; import org.objectweb.asm.Opcodes; import org.objectweb.asm.Type; @@ -34,6 +33,7 @@ import java.lang.invoke.MethodType; import java.util.BitSet; import java.util.Collection; +import java.util.Collections; import java.util.Iterator; import java.util.Map; import java.util.Objects; @@ -47,22 +47,30 @@ public final class WriterConstants { public static final int CLASS_VERSION = Opcodes.V1_8; public static final int ASM_VERSION = Opcodes.ASM5; - public static final String BASE_CLASS_NAME = Executable.class.getName(); - public static final Type BASE_CLASS_TYPE = Type.getType(Executable.class); + public static final String BASE_CLASS_NAME = PainlessScript.class.getName(); + public static final Type BASE_CLASS_TYPE = Type.getType(PainlessScript.class); + public static final Method CONVERT_TO_SCRIPT_EXCEPTION_METHOD = getAsmMethod(ScriptException.class, "convertToScriptException", + Throwable.class, Map.class); public static final String CLASS_NAME = BASE_CLASS_NAME + "$Script"; public static final Type CLASS_TYPE = Type.getObjectType(CLASS_NAME.replace('.', '/')); public static final Method CONSTRUCTOR = getAsmMethod(void.class, "", String.class, String.class, BitSet.class); public static final Method CLINIT = getAsmMethod(void.class, ""); - public static final Method EXECUTE = - getAsmMethod(Object.class, "execute", Map.class, Scorer.class, LeafDocLookup.class, Object.class); - public static final Type PAINLESS_ERROR_TYPE = Type.getType(PainlessError.class); + // All of these types are caught by the main method and rethrown as ScriptException + public static final Type PAINLESS_ERROR_TYPE = Type.getType(PainlessError.class); + public static final Type BOOTSTRAP_METHOD_ERROR_TYPE = Type.getType(BootstrapMethodError.class); + public static final Type OUT_OF_MEMORY_ERROR_TYPE = Type.getType(OutOfMemoryError.class); + public static final Type STACK_OVERFLOW_ERROR_TYPE = Type.getType(StackOverflowError.class); + public static final Type EXCEPTION_TYPE = Type.getType(Exception.class); + public static final Type PAINLESS_EXPLAIN_ERROR_TYPE = Type.getType(PainlessExplainError.class); + public static final Method PAINLESS_EXPLAIN_ERROR_GET_HEADERS_METHOD = getAsmMethod(Map.class, "getHeaders"); - public static final Type NEEDS_SCORE_TYPE = Type.getType(NeedsScore.class); - public static final Type SCORER_TYPE = Type.getType(Scorer.class); - public static final Method SCORER_SCORE = getAsmMethod(float.class, "score"); + public static final Type COLLECTIONS_TYPE = Type.getType(Collections.class); + public static final Method EMPTY_MAP_METHOD = getAsmMethod(Map.class, "emptyMap"); + + public static final MethodType USES_PARAMETER_METHOD_TYPE = MethodType.methodType(boolean.class); public static final Type MAP_TYPE = Type.getType(Map.class); public static final Method MAP_GET = getAsmMethod(Object.class, "get", Object.class); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 9de833e541917..19d15a4beb36c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -31,6 +31,7 @@ import org.elasticsearch.painless.CompilerSettings; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Location; +import org.elasticsearch.painless.ScriptInterface; import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.antlr.PainlessParser.AfterthoughtContext; import org.elasticsearch.painless.antlr.PainlessParser.ArgumentContext; @@ -172,10 +173,12 @@ */ public final class Walker extends PainlessParserBaseVisitor { - public static SSource buildPainlessTree(String sourceName, String sourceText, CompilerSettings settings, Printer debugStream) { - return new Walker(sourceName, sourceText, settings, debugStream).source; + public static SSource buildPainlessTree(ScriptInterface mainMethod, String sourceName, String sourceText, CompilerSettings settings, + Printer debugStream) { + return new Walker(mainMethod, sourceName, sourceText, settings, debugStream).source; } + private final ScriptInterface scriptInterface; private final SSource source; private final CompilerSettings settings; private final Printer debugStream; @@ -186,7 +189,8 @@ public static SSource buildPainlessTree(String sourceName, String sourceText, Co private final Globals globals; private int syntheticCounter = 0; - private Walker(String sourceName, String sourceText, CompilerSettings settings, Printer debugStream) { + private Walker(ScriptInterface scriptInterface, String sourceName, String sourceText, CompilerSettings settings, Printer debugStream) { + this.scriptInterface = scriptInterface; this.debugStream = debugStream; this.settings = settings; this.sourceName = Location.computeSourceName(sourceName, sourceText); @@ -256,7 +260,7 @@ public ANode visitSource(SourceContext ctx) { statements.add((AStatement)visit(statement)); } - return new SSource(settings, sourceName, sourceText, debugStream, (MainMethodReserved)reserved.pop(), + return new SSource(scriptInterface, settings, sourceName, sourceText, debugStream, (MainMethodReserved)reserved.pop(), location(ctx), functions, globals, statements); } @@ -850,7 +854,7 @@ public ANode visitMapinit(MapinitContext ctx) { @Override public ANode visitVariable(VariableContext ctx) { String name = ctx.ID().getText(); - reserved.peek().markReserved(name); + reserved.peek().markUsedVariable(name); return new EVariable(location(ctx), name); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java index c1d90c942ac8c..760b0d15d83ee 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java @@ -56,14 +56,11 @@ public final class SFunction extends AStatement { public static final class FunctionReserved implements Reserved { private int maxLoopCounter = 0; - public void markReserved(String name) { + @Override + public void markUsedVariable(String name) { // Do nothing. } - public boolean isReserved(String name) { - return Locals.FUNCTION_KEYWORDS.contains(name); - } - @Override public void setMaxLoopCounter(int max) { maxLoopCounter = max; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index dd4f56bb6a1d5..56e08b4ddf152 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -23,17 +23,19 @@ import org.elasticsearch.painless.Constant; import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.MethodKey; -import org.elasticsearch.painless.Executable; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptInterface; import org.elasticsearch.painless.SimpleChecksAdapter; import org.elasticsearch.painless.WriterConstants; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.ClassWriter; +import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; +import org.objectweb.asm.Type; import org.objectweb.asm.util.Printer; import org.objectweb.asm.util.TraceClassVisitor; @@ -42,18 +44,27 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.painless.WriterConstants.BASE_CLASS_TYPE; +import static org.elasticsearch.painless.WriterConstants.BOOTSTRAP_METHOD_ERROR_TYPE; import static org.elasticsearch.painless.WriterConstants.CLASS_TYPE; +import static org.elasticsearch.painless.WriterConstants.COLLECTIONS_TYPE; import static org.elasticsearch.painless.WriterConstants.CONSTRUCTOR; -import static org.elasticsearch.painless.WriterConstants.EXECUTE; -import static org.elasticsearch.painless.WriterConstants.MAP_GET; -import static org.elasticsearch.painless.WriterConstants.MAP_TYPE; +import static org.elasticsearch.painless.WriterConstants.CONVERT_TO_SCRIPT_EXCEPTION_METHOD; +import static org.elasticsearch.painless.WriterConstants.EMPTY_MAP_METHOD; +import static org.elasticsearch.painless.WriterConstants.EXCEPTION_TYPE; +import static org.elasticsearch.painless.WriterConstants.OUT_OF_MEMORY_ERROR_TYPE; +import static org.elasticsearch.painless.WriterConstants.PAINLESS_ERROR_TYPE; +import static org.elasticsearch.painless.WriterConstants.PAINLESS_EXPLAIN_ERROR_GET_HEADERS_METHOD; +import static org.elasticsearch.painless.WriterConstants.PAINLESS_EXPLAIN_ERROR_TYPE; +import static org.elasticsearch.painless.WriterConstants.STACK_OVERFLOW_ERROR_TYPE; /** * The root of all Painless trees. Contains a series of statements. @@ -61,44 +72,25 @@ public final class SSource extends AStatement { /** - * Tracks reserved variables. Must be given to any source of input + * Tracks derived arguments and the loop counter. Must be given to any source of input * prior to beginning the analysis phase so that reserved variables * are known ahead of time to assign appropriate slots without * being wasteful. */ public interface Reserved { - void markReserved(String name); - boolean isReserved(String name); + void markUsedVariable(String name); void setMaxLoopCounter(int max); int getMaxLoopCounter(); } public static final class MainMethodReserved implements Reserved { - private boolean score = false; - private boolean ctx = false; + private final Set usedVariables = new HashSet<>(); private int maxLoopCounter = 0; @Override - public void markReserved(String name) { - if (Locals.SCORE.equals(name)) { - score = true; - } else if (Locals.CTX.equals(name)) { - ctx = true; - } - } - - @Override - public boolean isReserved(String name) { - return Locals.MAIN_KEYWORDS.contains(name); - } - - public boolean usesScore() { - return score; - } - - public boolean usesCtx() { - return ctx; + public void markUsedVariable(String name) { + usedVariables.add(name); } @Override @@ -110,8 +102,13 @@ public void setMaxLoopCounter(int max) { public int getMaxLoopCounter() { return maxLoopCounter; } + + public Set getUsedVariables() { + return unmodifiableSet(usedVariables); + } } + private final ScriptInterface scriptInterface; private final CompilerSettings settings; private final String name; private final String source; @@ -124,10 +121,11 @@ public int getMaxLoopCounter() { private Locals mainMethod; private byte[] bytes; - public SSource(CompilerSettings settings, String name, String source, Printer debugStream, + public SSource(ScriptInterface scriptInterface, CompilerSettings settings, String name, String source, Printer debugStream, MainMethodReserved reserved, Location location, List functions, Globals globals, List statements) { super(location); + this.scriptInterface = Objects.requireNonNull(scriptInterface); this.settings = Objects.requireNonNull(settings); this.name = Objects.requireNonNull(name); this.source = Objects.requireNonNull(source); @@ -175,7 +173,7 @@ void analyze(Locals program) { throw createError(new IllegalArgumentException("Cannot generate an empty script.")); } - mainMethod = Locals.newMainMethodScope(program, reserved.usesScore(), reserved.usesCtx(), reserved.getMaxLoopCounter()); + mainMethod = Locals.newMainMethodScope(scriptInterface, program, reserved.getMaxLoopCounter()); AStatement last = statements.get(statements.size() - 1); @@ -202,7 +200,7 @@ public void write() { int classAccess = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL; String classBase = BASE_CLASS_TYPE.getInternalName(); String className = CLASS_TYPE.getInternalName(); - String classInterfaces[] = reserved.usesScore() ? new String[] { WriterConstants.NEEDS_SCORE_TYPE.getInternalName() } : null; + String classInterfaces[] = new String[] { Type.getType(scriptInterface.getInterface()).getInternalName() }; ClassWriter writer = new ClassWriter(classFrames); ClassVisitor visitor = writer; @@ -223,15 +221,16 @@ public void write() { constructor.visitCode(); constructor.loadThis(); constructor.loadArgs(); - constructor.invokeConstructor(org.objectweb.asm.Type.getType(Executable.class), CONSTRUCTOR); + constructor.invokeConstructor(BASE_CLASS_TYPE, CONSTRUCTOR); constructor.returnValue(); constructor.endMethod(); - // Write the execute method: - MethodWriter execute = new MethodWriter(Opcodes.ACC_PUBLIC, EXECUTE, visitor, globals.getStatements(), settings); - execute.visitCode(); - write(execute, globals); - execute.endMethod(); + // Write the method defined in the interface: + MethodWriter executeMethod = new MethodWriter(Opcodes.ACC_PUBLIC, scriptInterface.getExecuteMethod(), visitor, + globals.getStatements(), settings); + executeMethod.visitCode(); + write(executeMethod, globals); + executeMethod.endMethod(); // Write all functions: for (SFunction function : functions) { @@ -273,6 +272,15 @@ public void write() { clinit.endMethod(); } + // Write any uses$varName methods for used variables + for (org.objectweb.asm.commons.Method usesMethod : scriptInterface.getUsesMethods()) { + MethodWriter ifaceMethod = new MethodWriter(Opcodes.ACC_PUBLIC, usesMethod, visitor, globals.getStatements(), settings); + ifaceMethod.visitCode(); + ifaceMethod.push(reserved.getUsedVariables().contains(usesMethod.getName().substring("uses$".length()))); + ifaceMethod.returnValue(); + ifaceMethod.endMethod(); + } + // End writing the class and store the generated bytes. visitor.visitEnd(); @@ -281,30 +289,13 @@ public void write() { @Override void write(MethodWriter writer, Globals globals) { - if (reserved.usesScore()) { - // if the _score value is used, we do this once: - // final double _score = scorer.score(); - Variable scorer = mainMethod.getVariable(null, Locals.SCORER); - Variable score = mainMethod.getVariable(null, Locals.SCORE); - - writer.visitVarInsn(Opcodes.ALOAD, scorer.getSlot()); - writer.invokeVirtual(WriterConstants.SCORER_TYPE, WriterConstants.SCORER_SCORE); - writer.visitInsn(Opcodes.F2D); - writer.visitVarInsn(Opcodes.DSTORE, score.getSlot()); - } - - if (reserved.usesCtx()) { - // if the _ctx value is used, we do this once: - // final Map ctx = input.get("ctx"); - - Variable input = mainMethod.getVariable(null, Locals.PARAMS); - Variable ctx = mainMethod.getVariable(null, Locals.CTX); - - writer.visitVarInsn(Opcodes.ALOAD, input.getSlot()); - writer.push(Locals.CTX); - writer.invokeInterface(MAP_TYPE, MAP_GET); - writer.visitVarInsn(Opcodes.ASTORE, ctx.getSlot()); - } + // We wrap the whole method in a few try/catches to handle and/or convert other exceptions to ScriptException + Label startTry = new Label(); + Label endTry = new Label(); + Label startExplainCatch = new Label(); + Label startOtherCatch = new Label(); + Label endCatch = new Label(); + writer.mark(startTry); if (reserved.getMaxLoopCounter() > 0) { // if there is infinite loop protection, we do this once: @@ -321,9 +312,51 @@ void write(MethodWriter writer, Globals globals) { } if (!methodEscape) { - writer.visitInsn(Opcodes.ACONST_NULL); + switch (scriptInterface.getExecuteMethod().getReturnType().getSort()) { + case org.objectweb.asm.Type.VOID: break; + case org.objectweb.asm.Type.BOOLEAN: writer.push(false); break; + case org.objectweb.asm.Type.BYTE: writer.push(0); break; + case org.objectweb.asm.Type.SHORT: writer.push(0); break; + case org.objectweb.asm.Type.INT: writer.push(0); break; + case org.objectweb.asm.Type.LONG: writer.push(0L); break; + case org.objectweb.asm.Type.FLOAT: writer.push(0f); break; + case org.objectweb.asm.Type.DOUBLE: writer.push(0d); break; + default: writer.visitInsn(Opcodes.ACONST_NULL); + } writer.returnValue(); } + + writer.mark(endTry); + writer.goTo(endCatch); + // This looks like: + // } catch (PainlessExplainError e) { + // throw this.convertToScriptException(e, e.getHeaders()) + // } + writer.visitTryCatchBlock(startTry, endTry, startExplainCatch, PAINLESS_EXPLAIN_ERROR_TYPE.getInternalName()); + writer.mark(startExplainCatch); + writer.loadThis(); + writer.swap(); + writer.dup(); + writer.invokeVirtual(PAINLESS_EXPLAIN_ERROR_TYPE, PAINLESS_EXPLAIN_ERROR_GET_HEADERS_METHOD); + writer.invokeVirtual(BASE_CLASS_TYPE, CONVERT_TO_SCRIPT_EXCEPTION_METHOD); + writer.throwException(); + // This looks like: + // } catch (PainlessError | BootstrapMethodError | OutOfMemoryError | StackOverflowError | Exception e) { + // throw this.convertToScriptException(e, e.getHeaders()) + // } + // We *think* it is ok to catch OutOfMemoryError and StackOverflowError because Painless is stateless + writer.visitTryCatchBlock(startTry, endTry, startOtherCatch, PAINLESS_ERROR_TYPE.getInternalName()); + writer.visitTryCatchBlock(startTry, endTry, startOtherCatch, BOOTSTRAP_METHOD_ERROR_TYPE.getInternalName()); + writer.visitTryCatchBlock(startTry, endTry, startOtherCatch, OUT_OF_MEMORY_ERROR_TYPE.getInternalName()); + writer.visitTryCatchBlock(startTry, endTry, startOtherCatch, STACK_OVERFLOW_ERROR_TYPE.getInternalName()); + writer.visitTryCatchBlock(startTry, endTry, startOtherCatch, EXCEPTION_TYPE.getInternalName()); + writer.mark(startOtherCatch); + writer.loadThis(); + writer.swap(); + writer.invokeStatic(COLLECTIONS_TYPE, EMPTY_MAP_METHOD); + writer.invokeVirtual(BASE_CLASS_TYPE, CONVERT_TO_SCRIPT_EXCEPTION_METHOD); + writer.throwException(); + writer.mark(endCatch); } public BitSet getStatements() { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/CastTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/CastTests.java index 0551965f067a5..0ca72f993e520 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/CastTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/CastTests.java @@ -21,7 +21,7 @@ /** Tests for explicit casts */ public class CastTests extends ScriptTestCase { - + /** * Unary operator with explicit cast */ @@ -34,7 +34,7 @@ public void testUnaryOperator() { assertEquals(5L, exec("long x = 5L; return (long) (+x);")); assertEquals(5D, exec("long x = 5L; return (double) (+x);")); } - + /** * Binary operators with explicit cast */ @@ -73,7 +73,7 @@ public void testBinaryPrefix() { assertEquals(6L, exec("long x = 5L; return (long) (++x);")); assertEquals(6D, exec("long x = 5L; return (double) (++x);")); } - + /** * Binary compound postifx with explicit cast */ @@ -86,7 +86,7 @@ public void testBinaryPostfix() { assertEquals(5L, exec("long x = 5L; return (long) (x++);")); assertEquals(5D, exec("long x = 5L; return (double) (x++);")); } - + /** * Shift operators with explicit cast */ @@ -99,7 +99,7 @@ public void testShiftOperator() { assertEquals(10L, exec("long x = 5L; return (long) (x << 1);")); assertEquals(10D, exec("long x = 5L; return (double) (x << 1);")); } - + /** * Shift compound assignment with explicit cast */ @@ -112,7 +112,7 @@ public void testShiftCompoundAssignment() { assertEquals(10L, exec("long x = 5L; return (long) (x <<= 1);")); assertEquals(10D, exec("long x = 5L; return (double) (x <<= 1);")); } - + /** * Test that without a cast, we fail when conversions would narrow. */ @@ -136,7 +136,7 @@ public void testIllegalConversions() { exec("long x = 5L; boolean y = (x + x); return y"); }); } - + /** * Test that even with a cast, some things aren't allowed. */ @@ -161,7 +161,7 @@ public void testIllegalExplicitConversions() { public void testMethodCallDef() { assertEquals(5, exec("def x = 5; return (int)x.longValue();")); } - + /** * Currently these do not adopt the argument value, we issue a separate cast! */ @@ -170,7 +170,7 @@ public void testArgumentsDef() { assertEquals(6, exec("def x = 5; def y = 1L; return x + (int)y")); assertEquals('b', exec("def x = 'abcdeg'; def y = 1L; x.charAt((int)y)")); } - + /** * Unary operators adopt the return value */ @@ -183,7 +183,7 @@ public void testUnaryOperatorDef() { assertEquals(5L, exec("def x = 5L; return (long) (+x);")); assertEquals(5D, exec("def x = 5L; return (double) (+x);")); } - + /** * Binary operators adopt the return value */ @@ -196,7 +196,7 @@ public void testBinaryOperatorDef() { assertEquals(6L, exec("def x = 5L; return (long) (x + 1);")); assertEquals(6D, exec("def x = 5L; return (double) (x + 1);")); } - + /** * Binary operators don't yet adopt the return value with compound assignment */ @@ -209,7 +209,7 @@ public void testBinaryCompoundAssignmentDef() { assertEquals(6L, exec("def x = 5L; return (long) (x += 1);")); assertEquals(6D, exec("def x = 5L; return (double) (x += 1);")); } - + /** * Binary operators don't yet adopt the return value with compound assignment */ @@ -222,7 +222,7 @@ public void testBinaryCompoundAssignmentPrefix() { assertEquals(6L, exec("def x = 5L; return (long) (++x);")); assertEquals(6D, exec("def x = 5L; return (double) (++x);")); } - + /** * Binary operators don't yet adopt the return value with compound assignment */ @@ -235,7 +235,7 @@ public void testBinaryCompoundAssignmentPostfix() { assertEquals(5L, exec("def x = 5L; return (long) (x++);")); assertEquals(5D, exec("def x = 5L; return (double) (x++);")); } - + /** * Shift operators adopt the return value */ @@ -248,7 +248,7 @@ public void testShiftOperatorDef() { assertEquals(10L, exec("def x = 5L; return (long) (x << 1);")); assertEquals(10D, exec("def x = 5L; return (double) (x << 1);")); } - + /** * Shift operators don't yet adopt the return value with compound assignment */ @@ -261,7 +261,7 @@ public void testShiftCompoundAssignmentDef() { assertEquals(10L, exec("def x = 5L; return (long) (x <<= 1);")); assertEquals(10D, exec("def x = 5L; return (double) (x <<= 1);")); } - + /** * Test that without a cast, we fail when conversions would narrow. */ @@ -285,7 +285,21 @@ public void testIllegalConversionsDef() { exec("def x = 5L; boolean y = (x + x); return y"); }); } - + + public void testUnboxMethodParameters() { + assertEquals('a', exec("'a'.charAt(Integer.valueOf(0))")); + } + + public void testIllegalCastInMethodArgument() { + assertEquals('a', exec("'a'.charAt(0)")); + Exception e = expectScriptThrows(ClassCastException.class, () -> exec("'a'.charAt(0L)")); + assertEquals("Cannot cast from [long] to [int].", e.getMessage()); + e = expectScriptThrows(ClassCastException.class, () -> exec("'a'.charAt(0.0f)")); + assertEquals("Cannot cast from [float] to [int].", e.getMessage()); + e = expectScriptThrows(ClassCastException.class, () -> exec("'a'.charAt(0.0d)")); + assertEquals("Cannot cast from [double] to [int].", e.getMessage()); + } + /** * Test that even with a cast, some things aren't allowed. * (stuff that methodhandles explicitCastArguments would otherwise allow) diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java index d8e6896f083c2..9d06281b4267a 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java @@ -30,16 +30,16 @@ final class Debugger { /** compiles source to bytecode, and returns debugging output */ static String toString(final String source) { - return toString(source, new CompilerSettings()); + return toString(GenericElasticsearchScript.class, source, new CompilerSettings()); } /** compiles to bytecode, and returns debugging output */ - static String toString(String source, CompilerSettings settings) { + static String toString(Class iface, String source, CompilerSettings settings) { StringWriter output = new StringWriter(); PrintWriter outputWriter = new PrintWriter(output); Textifier textifier = new Textifier(); try { - Compiler.compile("", source, settings, textifier); + Compiler.compile(iface, "", source, settings, textifier); } catch (Exception e) { textifier.print(outputWriter); e.addSuppressed(new Exception("current bytecode: \n" + output)); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionTests.java index 68bac55db781e..8f9505d09c96c 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionTests.java @@ -54,6 +54,18 @@ public void testEmpty() { assertThat(expected.getMessage(), containsString("Cannot generate an empty function")); } + public void testReturnsAreUnboxedIfNeeded() { + assertEquals((byte) 5, exec("byte get() {Byte.valueOf(5)} get()")); + assertEquals((short) 5, exec("short get() {Byte.valueOf(5)} get()")); + assertEquals(5, exec("int get() {Byte.valueOf(5)} get()")); + assertEquals((short) 5, exec("short get() {Short.valueOf(5)} get()")); + assertEquals(5, exec("int get() {Integer.valueOf(5)} get()")); + assertEquals(5.0f, exec("float get() {Float.valueOf(5)} get()")); + assertEquals(5.0d, exec("double get() {Float.valueOf(5)} get()")); + assertEquals(5.0d, exec("double get() {Double.valueOf(5)} get()")); + assertEquals(true, exec("boolean get() {Boolean.TRUE} get()")); + } + public void testDuplicates() { Exception expected = expectScriptThrows(IllegalArgumentException.class, () -> { exec("void test(int x) {x = 2;} void test(def y) {y = 3;} test()"); @@ -61,6 +73,15 @@ public void testDuplicates() { assertThat(expected.getMessage(), containsString("Duplicate functions")); } + public void testBadCastFromMethod() { + Exception e = expectScriptThrows(ClassCastException.class, () -> exec("int get() {5L} get()")); + assertEquals("Cannot cast from [long] to [int].", e.getMessage()); + e = expectScriptThrows(ClassCastException.class, () -> exec("int get() {5.1f} get()")); + assertEquals("Cannot cast from [float] to [int].", e.getMessage()); + e = expectScriptThrows(ClassCastException.class, () -> exec("int get() {5.1d} get()")); + assertEquals("Cannot cast from [double] to [int].", e.getMessage()); + } + public void testInfiniteLoop() { Error expected = expectScriptThrows(PainlessError.class, () -> { exec("void test() {boolean x = true; while (x) {}} test()"); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ImplementInterfacesTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ImplementInterfacesTests.java new file mode 100644 index 0000000000000..2455af32528fc --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ImplementInterfacesTests.java @@ -0,0 +1,445 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; + +/** + * Tests for Painless implementing different interfaces. + */ +public class ImplementInterfacesTests extends ScriptTestCase { + public interface NoArgs { + String[] ARGUMENTS = new String[] {}; + Object execute(); + } + public void testNoArgs() { + assertEquals(1, scriptEngine.compile(NoArgs.class, null, "1", emptyMap()).execute()); + assertEquals("foo", scriptEngine.compile(NoArgs.class, null, "'foo'", emptyMap()).execute()); + + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(NoArgs.class, null, "doc", emptyMap())); + assertEquals("Variable [doc] is not defined.", e.getMessage()); + // _score was once embedded into painless by deep magic + e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(NoArgs.class, null, "_score", emptyMap())); + assertEquals("Variable [_score] is not defined.", e.getMessage()); + + String debug = Debugger.toString(NoArgs.class, "int i = 0", new CompilerSettings()); + /* Elasticsearch requires that scripts that return nothing return null. We hack that together by returning null from scripts that + * return Object if they don't return anything. */ + assertThat(debug, containsString("ACONST_NULL")); + assertThat(debug, containsString("ARETURN")); + } + + public interface OneArg { + String[] ARGUMENTS = new String[] {"arg"}; + Object execute(Object arg); + } + public void testOneArg() { + Object rando = randomInt(); + assertEquals(rando, scriptEngine.compile(OneArg.class, null, "arg", emptyMap()).execute(rando)); + rando = randomAsciiOfLength(5); + assertEquals(rando, scriptEngine.compile(OneArg.class, null, "arg", emptyMap()).execute(rando)); + + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(NoArgs.class, null, "doc", emptyMap())); + assertEquals("Variable [doc] is not defined.", e.getMessage()); + // _score was once embedded into painless by deep magic + e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(NoArgs.class, null, "_score", emptyMap())); + assertEquals("Variable [_score] is not defined.", e.getMessage()); + } + + public interface ArrayArg { + String[] ARGUMENTS = new String[] {"arg"}; + Object execute(String[] arg); + } + public void testArrayArg() { + String rando = randomAsciiOfLength(5); + assertEquals(rando, scriptEngine.compile(ArrayArg.class, null, "arg[0]", emptyMap()).execute(new String[] {rando, "foo"})); + } + + public interface PrimitiveArrayArg { + String[] ARGUMENTS = new String[] {"arg"}; + Object execute(int[] arg); + } + public void testPrimitiveArrayArg() { + int rando = randomInt(); + assertEquals(rando, scriptEngine.compile(PrimitiveArrayArg.class, null, "arg[0]", emptyMap()).execute(new int[] {rando, 10})); + } + + public interface DefArrayArg { + String[] ARGUMENTS = new String[] {"arg"}; + Object execute(Object[] arg); + } + public void testDefArrayArg() { + Object rando = randomInt(); + assertEquals(rando, scriptEngine.compile(DefArrayArg.class, null, "arg[0]", emptyMap()).execute(new Object[] {rando, 10})); + rando = randomAsciiOfLength(5); + assertEquals(rando, scriptEngine.compile(DefArrayArg.class, null, "arg[0]", emptyMap()).execute(new Object[] {rando, 10})); + assertEquals(5, scriptEngine.compile(DefArrayArg.class, null, "arg[0].length()", emptyMap()).execute(new Object[] {rando, 10})); + } + + public interface ManyArgs { + String[] ARGUMENTS = new String[] {"a", "b", "c", "d"}; + Object execute(int a, int b, int c, int d); + boolean uses$a(); + boolean uses$b(); + boolean uses$c(); + boolean uses$d(); + } + public void testManyArgs() { + int rando = randomInt(); + assertEquals(rando, scriptEngine.compile(ManyArgs.class, null, "a", emptyMap()).execute(rando, 0, 0, 0)); + assertEquals(10, scriptEngine.compile(ManyArgs.class, null, "a + b + c + d", emptyMap()).execute(1, 2, 3, 4)); + + // While we're here we can verify that painless correctly finds used variables + ManyArgs script = scriptEngine.compile(ManyArgs.class, null, "a", emptyMap()); + assertTrue(script.uses$a()); + assertFalse(script.uses$b()); + assertFalse(script.uses$c()); + assertFalse(script.uses$d()); + script = scriptEngine.compile(ManyArgs.class, null, "a + b + c", emptyMap()); + assertTrue(script.uses$a()); + assertTrue(script.uses$b()); + assertTrue(script.uses$c()); + assertFalse(script.uses$d()); + script = scriptEngine.compile(ManyArgs.class, null, "a + b + c + d", emptyMap()); + assertTrue(script.uses$a()); + assertTrue(script.uses$b()); + assertTrue(script.uses$c()); + assertTrue(script.uses$d()); + } + + public interface VarargTest { + String[] ARGUMENTS = new String[] {"arg"}; + Object execute(String... arg); + } + public void testVararg() { + assertEquals("foo bar baz", scriptEngine.compile(VarargTest.class, null, "String.join(' ', Arrays.asList(arg))", emptyMap()) + .execute("foo", "bar", "baz")); + } + + public interface DefaultMethods { + String[] ARGUMENTS = new String[] {"a", "b", "c", "d"}; + Object execute(int a, int b, int c, int d); + default Object executeWithOne() { + return execute(1, 1, 1, 1); + } + default Object executeWithASingleOne(int a, int b, int c) { + return execute(a, b, c, 1); + } + } + public void testDefaultMethods() { + int rando = randomInt(); + assertEquals(rando, scriptEngine.compile(DefaultMethods.class, null, "a", emptyMap()).execute(rando, 0, 0, 0)); + assertEquals(rando, scriptEngine.compile(DefaultMethods.class, null, "a", emptyMap()).executeWithASingleOne(rando, 0, 0)); + assertEquals(10, scriptEngine.compile(DefaultMethods.class, null, "a + b + c + d", emptyMap()).execute(1, 2, 3, 4)); + assertEquals(4, scriptEngine.compile(DefaultMethods.class, null, "a + b + c + d", emptyMap()).executeWithOne()); + assertEquals(7, scriptEngine.compile(DefaultMethods.class, null, "a + b + c + d", emptyMap()).executeWithASingleOne(1, 2, 3)); + } + + public interface ReturnsVoid { + String[] ARGUMENTS = new String[] {"map"}; + void execute(Map map); + } + public void testReturnsVoid() { + Map map = new HashMap<>(); + scriptEngine.compile(ReturnsVoid.class, null, "map.a = 'foo'", emptyMap()).execute(map); + assertEquals(singletonMap("a", "foo"), map); + scriptEngine.compile(ReturnsVoid.class, null, "map.remove('a')", emptyMap()).execute(map); + assertEquals(emptyMap(), map); + + String debug = Debugger.toString(ReturnsVoid.class, "int i = 0", new CompilerSettings()); + // The important thing is that this contains the opcode for returning void + assertThat(debug, containsString(" RETURN")); + // We shouldn't contain any weird "default to null" logic + assertThat(debug, not(containsString("ACONST_NULL"))); + } + + public interface ReturnsPrimitiveBoolean { + String[] ARGUMENTS = new String[] {}; + boolean execute(); + } + public void testReturnsPrimitiveBoolean() { + assertEquals(true, scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "true", emptyMap()).execute()); + assertEquals(false, scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "false", emptyMap()).execute()); + assertEquals(true, scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "Boolean.TRUE", emptyMap()).execute()); + assertEquals(false, scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "Boolean.FALSE", emptyMap()).execute()); + + assertEquals(true, scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "def i = true; i", emptyMap()).execute()); + assertEquals(true, scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "def i = Boolean.TRUE; i", emptyMap()).execute()); + + assertEquals(true, scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "true || false", emptyMap()).execute()); + + String debug = Debugger.toString(ReturnsPrimitiveBoolean.class, "false", new CompilerSettings()); + assertThat(debug, containsString("ICONST_0")); + // The important thing here is that we have the bytecode for returning an integer instead of an object. booleans are integers. + assertThat(debug, containsString("IRETURN")); + + Exception e = expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "1L", emptyMap()).execute()); + assertEquals("Cannot cast from [long] to [boolean].", e.getMessage()); + e = expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "1.1f", emptyMap()).execute()); + assertEquals("Cannot cast from [float] to [boolean].", e.getMessage()); + e = expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "1.1d", emptyMap()).execute()); + assertEquals("Cannot cast from [double] to [boolean].", e.getMessage()); + expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "def i = 1L; i", emptyMap()).execute()); + expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "def i = 1.1f; i", emptyMap()).execute()); + expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "def i = 1.1d; i", emptyMap()).execute()); + + assertEquals(false, scriptEngine.compile(ReturnsPrimitiveBoolean.class, null, "int i = 0", emptyMap()).execute()); + } + + public interface ReturnsPrimitiveInt { + String[] ARGUMENTS = new String[] {}; + int execute(); + } + public void testReturnsPrimitiveInt() { + assertEquals(1, scriptEngine.compile(ReturnsPrimitiveInt.class, null, "1", emptyMap()).execute()); + assertEquals(1, scriptEngine.compile(ReturnsPrimitiveInt.class, null, "(int) 1L", emptyMap()).execute()); + assertEquals(1, scriptEngine.compile(ReturnsPrimitiveInt.class, null, "(int) 1.1d", emptyMap()).execute()); + assertEquals(1, scriptEngine.compile(ReturnsPrimitiveInt.class, null, "(int) 1.1f", emptyMap()).execute()); + assertEquals(1, scriptEngine.compile(ReturnsPrimitiveInt.class, null, "Integer.valueOf(1)", emptyMap()).execute()); + + assertEquals(1, scriptEngine.compile(ReturnsPrimitiveInt.class, null, "def i = 1; i", emptyMap()).execute()); + assertEquals(1, scriptEngine.compile(ReturnsPrimitiveInt.class, null, "def i = Integer.valueOf(1); i", emptyMap()).execute()); + + assertEquals(2, scriptEngine.compile(ReturnsPrimitiveInt.class, null, "1 + 1", emptyMap()).execute()); + + String debug = Debugger.toString(ReturnsPrimitiveInt.class, "1", new CompilerSettings()); + assertThat(debug, containsString("ICONST_1")); + // The important thing here is that we have the bytecode for returning an integer instead of an object + assertThat(debug, containsString("IRETURN")); + + Exception e = expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveInt.class, null, "1L", emptyMap()).execute()); + assertEquals("Cannot cast from [long] to [int].", e.getMessage()); + e = expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveInt.class, null, "1.1f", emptyMap()).execute()); + assertEquals("Cannot cast from [float] to [int].", e.getMessage()); + e = expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveInt.class, null, "1.1d", emptyMap()).execute()); + assertEquals("Cannot cast from [double] to [int].", e.getMessage()); + expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveInt.class, null, "def i = 1L; i", emptyMap()).execute()); + expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveInt.class, null, "def i = 1.1f; i", emptyMap()).execute()); + expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveInt.class, null, "def i = 1.1d; i", emptyMap()).execute()); + + assertEquals(0, scriptEngine.compile(ReturnsPrimitiveInt.class, null, "int i = 0", emptyMap()).execute()); + } + + public interface ReturnsPrimitiveFloat { + String[] ARGUMENTS = new String[] {}; + float execute(); + } + public void testReturnsPrimitiveFloat() { + assertEquals(1.1f, scriptEngine.compile(ReturnsPrimitiveFloat.class, null, "1.1f", emptyMap()).execute(), 0); + assertEquals(1.1f, scriptEngine.compile(ReturnsPrimitiveFloat.class, null, "(float) 1.1d", emptyMap()).execute(), 0); + assertEquals(1.1f, scriptEngine.compile(ReturnsPrimitiveFloat.class, null, "def d = 1.1f; d", emptyMap()).execute(), 0); + assertEquals(1.1f, + scriptEngine.compile(ReturnsPrimitiveFloat.class, null, "def d = Float.valueOf(1.1f); d", emptyMap()).execute(), 0); + + assertEquals(1.1f + 6.7f, scriptEngine.compile(ReturnsPrimitiveFloat.class, null, "1.1f + 6.7f", emptyMap()).execute(), 0); + + Exception e = expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveFloat.class, null, "1.1d", emptyMap()).execute()); + assertEquals("Cannot cast from [double] to [float].", e.getMessage()); + e = expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveFloat.class, null, "def d = 1.1d; d", emptyMap()).execute()); + e = expectScriptThrows(ClassCastException.class, () -> + scriptEngine.compile(ReturnsPrimitiveFloat.class, null, "def d = Double.valueOf(1.1); d", emptyMap()).execute()); + + String debug = Debugger.toString(ReturnsPrimitiveFloat.class, "1f", new CompilerSettings()); + assertThat(debug, containsString("FCONST_1")); + // The important thing here is that we have the bytecode for returning a float instead of an object + assertThat(debug, containsString("FRETURN")); + + assertEquals(0.0f, scriptEngine.compile(ReturnsPrimitiveFloat.class, null, "int i = 0", emptyMap()).execute(), 0); + } + + public interface ReturnsPrimitiveDouble { + String[] ARGUMENTS = new String[] {}; + double execute(); + } + public void testReturnsPrimitiveDouble() { + assertEquals(1.0, scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "1", emptyMap()).execute(), 0); + assertEquals(1.0, scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "1L", emptyMap()).execute(), 0); + assertEquals(1.1, scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "1.1d", emptyMap()).execute(), 0); + assertEquals((double) 1.1f, scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "1.1f", emptyMap()).execute(), 0); + assertEquals(1.1, scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "Double.valueOf(1.1)", emptyMap()).execute(), 0); + assertEquals((double) 1.1f, + scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "Float.valueOf(1.1f)", emptyMap()).execute(), 0); + + assertEquals(1.0, scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "def d = 1; d", emptyMap()).execute(), 0); + assertEquals(1.0, scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "def d = 1L; d", emptyMap()).execute(), 0); + assertEquals(1.1, scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "def d = 1.1d; d", emptyMap()).execute(), 0); + assertEquals((double) 1.1f, scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "def d = 1.1f; d", emptyMap()).execute(), 0); + assertEquals(1.1, + scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "def d = Double.valueOf(1.1); d", emptyMap()).execute(), 0); + assertEquals((double) 1.1f, + scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "def d = Float.valueOf(1.1f); d", emptyMap()).execute(), 0); + + assertEquals(1.1 + 6.7, scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "1.1 + 6.7", emptyMap()).execute(), 0); + + String debug = Debugger.toString(ReturnsPrimitiveDouble.class, "1", new CompilerSettings()); + assertThat(debug, containsString("DCONST_1")); + // The important thing here is that we have the bytecode for returning a double instead of an object + assertThat(debug, containsString("DRETURN")); + + assertEquals(0.0, scriptEngine.compile(ReturnsPrimitiveDouble.class, null, "int i = 0", emptyMap()).execute(), 0); + } + + public interface NoArgumentsConstant { + Object execute(String foo); + } + public void testNoArgumentsConstant() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(NoArgumentsConstant.class, null, "1", emptyMap())); + assertThat(e.getMessage(), startsWith("Painless needs a constant [String[] ARGUMENTS] on all interfaces it implements with the " + + "names of the method arguments but [" + NoArgumentsConstant.class.getName() + "] doesn't have one.")); + } + + public interface WrongArgumentsConstant { + boolean[] ARGUMENTS = new boolean[] {false}; + Object execute(String foo); + } + public void testWrongArgumentsConstant() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(WrongArgumentsConstant.class, null, "1", emptyMap())); + assertThat(e.getMessage(), startsWith("Painless needs a constant [String[] ARGUMENTS] on all interfaces it implements with the " + + "names of the method arguments but [" + WrongArgumentsConstant.class.getName() + "] doesn't have one.")); + } + + public interface WrongLengthOfArgumentConstant { + String[] ARGUMENTS = new String[] {"foo", "bar"}; + Object execute(String foo); + } + public void testWrongLengthOfArgumentConstant() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(WrongLengthOfArgumentConstant.class, null, "1", emptyMap())); + assertThat(e.getMessage(), startsWith("[" + WrongLengthOfArgumentConstant.class.getName() + "#ARGUMENTS] has length [2] but [" + + WrongLengthOfArgumentConstant.class.getName() + "#execute] takes [1] argument.")); + } + + public interface UnknownArgType { + String[] ARGUMENTS = new String[] {"foo"}; + Object execute(UnknownArgType foo); + } + public void testUnknownArgType() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(UnknownArgType.class, null, "1", emptyMap())); + assertEquals("[foo] is of unknown type [" + UnknownArgType.class.getName() + ". Painless interfaces can only accept arguments " + + "that are of whitelisted types.", e.getMessage()); + } + + public interface UnknownReturnType { + String[] ARGUMENTS = new String[] {"foo"}; + UnknownReturnType execute(String foo); + } + public void testUnknownReturnType() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(UnknownReturnType.class, null, "1", emptyMap())); + assertEquals("Painless can only implement execute methods returning a whitelisted type but [" + UnknownReturnType.class.getName() + + "#execute] returns [" + UnknownReturnType.class.getName() + "] which isn't whitelisted.", e.getMessage()); + } + + public interface UnknownArgTypeInArray { + String[] ARGUMENTS = new String[] {"foo"}; + Object execute(UnknownArgTypeInArray[] foo); + } + public void testUnknownArgTypeInArray() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(UnknownArgTypeInArray.class, null, "1", emptyMap())); + assertEquals("[foo] is of unknown type [" + UnknownArgTypeInArray.class.getName() + ". Painless interfaces can only accept " + + "arguments that are of whitelisted types.", e.getMessage()); + } + + public interface TwoExecuteMethods { + Object execute(); + Object execute(boolean foo); + } + public void testTwoExecuteMethods() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(TwoExecuteMethods.class, null, "null", emptyMap())); + assertEquals("Painless can only implement interfaces that have a single method named [execute] but [" + + TwoExecuteMethods.class.getName() + "] has more than one.", e.getMessage()); + } + + public interface BadMethod { + Object something(); + } + public void testBadMethod() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(BadMethod.class, null, "null", emptyMap())); + assertEquals("Painless can only implement methods named [execute] and [uses$argName] but [" + BadMethod.class.getName() + + "] contains a method named [something]", e.getMessage()); + } + + public interface BadUsesReturn { + String[] ARGUMENTS = new String[] {"foo"}; + Object execute(String foo); + Object uses$foo(); + } + public void testBadUsesReturn() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(BadUsesReturn.class, null, "null", emptyMap())); + assertEquals("Painless can only implement uses$ methods that return boolean but [" + BadUsesReturn.class.getName() + + "#uses$foo] returns [java.lang.Object].", e.getMessage()); + } + + public interface BadUsesParameter { + String[] ARGUMENTS = new String[] {"foo", "bar"}; + Object execute(String foo, String bar); + boolean uses$bar(boolean foo); + } + public void testBadUsesParameter() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(BadUsesParameter.class, null, "null", emptyMap())); + assertEquals("Painless can only implement uses$ methods that do not take parameters but [" + BadUsesParameter.class.getName() + + "#uses$bar] does.", e.getMessage()); + } + + public interface BadUsesName { + String[] ARGUMENTS = new String[] {"foo", "bar"}; + Object execute(String foo, String bar); + boolean uses$baz(); + } + public void testBadUsesName() { + Exception e = expectScriptThrows(IllegalArgumentException.class, () -> + scriptEngine.compile(BadUsesName.class, null, "null", emptyMap())); + assertEquals("Painless can only implement uses$ methods that match a parameter name but [" + BadUsesName.class.getName() + + "#uses$baz] doesn't match any of [foo, bar].", e.getMessage()); + } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 10149674beaec..544095caf9dc6 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -19,6 +19,8 @@ package org.elasticsearch.painless; +import junit.framework.AssertionFailedError; + import org.apache.lucene.search.Scorer; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.common.settings.Settings; @@ -30,8 +32,6 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import junit.framework.AssertionFailedError; - import java.util.HashMap; import java.util.Map; @@ -76,10 +76,11 @@ public Object exec(String script, Map vars, boolean picky) { public Object exec(String script, Map vars, Map compileParams, Scorer scorer, boolean picky) { // test for ambiguity errors before running the actual script if picky is true if (picky) { + ScriptInterface scriptInterface = new ScriptInterface(GenericElasticsearchScript.class); CompilerSettings pickySettings = new CompilerSettings(); pickySettings.setPicky(true); pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings())); - Walker.buildPainlessTree(getTestName(), script, pickySettings, null); + Walker.buildPainlessTree(scriptInterface, getTestName(), script, pickySettings, null); } // test actual script execution Object object = scriptEngine.compile(null, script, compileParams); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index 53d564de87d88..a4530823c9ed3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -28,8 +28,10 @@ import org.elasticsearch.painless.Definition.RuntimeClass; import org.elasticsearch.painless.Definition.Struct; import org.elasticsearch.painless.FeatureTest; +import org.elasticsearch.painless.GenericElasticsearchScript; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; +import org.elasticsearch.painless.ScriptInterface; import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.test.ESTestCase; @@ -894,10 +896,11 @@ private void assertToString(String expected, String code) { } private SSource walk(String code) { + ScriptInterface scriptInterface = new ScriptInterface(GenericElasticsearchScript.class); CompilerSettings compilerSettings = new CompilerSettings(); compilerSettings.setRegexesEnabled(true); try { - return Walker.buildPainlessTree(getTestName(), code, compilerSettings, null); + return Walker.buildPainlessTree(scriptInterface, getTestName(), code, compilerSettings, null); } catch (Exception e) { throw new AssertionError("Failed to compile: " + code, e); } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index 2f88e54b089db..00c34e8e5e45d 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; @@ -44,6 +45,7 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -66,6 +68,7 @@ public final class QueryAnalyzer { map.put(CommonTermsQuery.class, commonTermsQuery()); map.put(BlendedTermQuery.class, blendedTermQuery()); map.put(PhraseQuery.class, phraseQuery()); + map.put(MultiPhraseQuery.class, multiPhraseQuery()); map.put(SpanTermQuery.class, spanTermQuery()); map.put(SpanNearQuery.class, spanNearQuery()); map.put(SpanOrQuery.class, spanOrQuery()); @@ -197,6 +200,21 @@ static Function phraseQuery() { }; } + static Function multiPhraseQuery() { + return query -> { + Term[][] terms = ((MultiPhraseQuery) query).getTermArrays(); + if (terms.length == 0) { + return new Result(true, Collections.emptySet()); + } + + Set bestTermArr = null; + for (Term[] termArr : terms) { + bestTermArr = selectTermListWithTheLongestShortestTerm(bestTermArr, new HashSet<>(Arrays.asList(termArr))); + } + return new Result(false, bestTermArr); + }; + } + static Function spanTermQuery() { return query -> { Term term = ((SpanTermQuery) query).getTerm(); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index c00872a6e820f..4a8bec903a7ff 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermInSetQuery; @@ -93,6 +94,21 @@ public void testExtractQueryMetadata_phraseQuery() { assertThat(terms.get(0).bytes(), equalTo(phraseQuery.getTerms()[0].bytes())); } + public void testExtractQueryMetadata_multiPhraseQuery() { + MultiPhraseQuery multiPhraseQuery = new MultiPhraseQuery.Builder() + .add(new Term("_field", "_long_term")) + .add(new Term[] {new Term("_field", "_long_term"), new Term("_field", "_term")}) + .add(new Term[] {new Term("_field", "_long_term"), new Term("_field", "_very_long_term")}) + .add(new Term[] {new Term("_field", "_very_long_term")}) + .build(); + Result result = analyze(multiPhraseQuery); + assertThat(result.verified, is(false)); + List terms = new ArrayList<>(result.terms); + assertThat(terms.size(), equalTo(1)); + assertThat(terms.get(0).field(), equalTo("_field")); + assertThat(terms.get(0).bytes().utf8ToString(), equalTo("_very_long_term")); + } + public void testExtractQueryMetadata_booleanQuery() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); TermQuery termQuery1 = new TermQuery(new Term("_field", "_term")); diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 0b222c1a3cd71..eba8b9646177d 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -25,11 +25,9 @@ esplugin { hasClientJar = true } -integTest { - cluster { - // Whitelist reindexing from the local node so we can test it. - setting 'reindex.remote.whitelist', '127.0.0.1:*' - } +integTestCluster { + // Whitelist reindexing from the local node so we can test it. + setting 'reindex.remote.whitelist', '127.0.0.1:*' } run { diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index dc769e3c1f300..7008111ca9c54 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -22,8 +22,6 @@ esplugin { classname 'org.elasticsearch.plugin.repository.url.URLRepositoryPlugin' } -integTest { - cluster { - setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - } -} \ No newline at end of file +integTestCluster { + setting 'repositories.url.allowed_urls', 'http://snapshot.test*' +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index efcc8d1f2ea1a..a4259b41fd829 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -127,19 +127,20 @@ public void sendResponse(RestResponse response) { if (release) { promise.addListener(f -> ((Releasable)content).close()); - release = false; } if (isCloseConnection()) { promise.addListener(ChannelFutureListener.CLOSE); } + final Object msg; if (pipelinedRequest != null) { - channel.writeAndFlush(pipelinedRequest.createHttpResponse(resp, promise)); + msg = pipelinedRequest.createHttpResponse(resp, promise); } else { - channel.writeAndFlush(resp, promise); + msg = resp; } - + channel.writeAndFlush(msg, promise); + release = false; } finally { if (release) { ((Releasable) content).close(); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 7ea9b6b5eea4a..be1c840c516e5 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -69,7 +69,6 @@ import org.elasticsearch.http.netty4.cors.Netty4CorsConfigBuilder; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; import org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler; -import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestUtils; @@ -143,17 +142,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem Setting.byteSizeSetting("http.tcp.receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, Property.NodeScope, Property.Shared); public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = - Setting.byteSizeSetting("transport.netty.receive_predictor_size", - settings -> { - long defaultReceiverPredictor = 512 * 1024; - if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes() > 0) { - // we can guess a better default... - long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes()) / SETTING_HTTP_WORKER_COUNT.get - (settings)); - defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024)); - } - return new ByteSizeValue(defaultReceiverPredictor).toString(); - }, Property.NodeScope); + Setting.byteSizeSetting("http.netty.receive_predictor_size", new ByteSizeValue(32, ByteSizeUnit.KB), Property.NodeScope); public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("http.netty.receive_predictor_min", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX = diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedRequest.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedRequest.java index 792397a3c3dd4..be1669c60c297 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedRequest.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedRequest.java @@ -20,9 +20,7 @@ package org.elasticsearch.http.netty4.pipelining; import io.netty.channel.ChannelPromise; -import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.LastHttpContent; import io.netty.util.ReferenceCounted; @@ -35,8 +33,7 @@ public class HttpPipelinedRequest implements ReferenceCounted { private final LastHttpContent last; private final int sequence; - - HttpPipelinedRequest(final LastHttpContent last, final int sequence) { + public HttpPipelinedRequest(final LastHttpContent last, final int sequence) { this.last = last; this.sequence = sequence; } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedResponse.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedResponse.java index 21659d5fbdfd1..6b6db94d69a59 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedResponse.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedResponse.java @@ -21,7 +21,6 @@ import io.netty.channel.ChannelPromise; import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpResponse; import io.netty.util.ReferenceCounted; class HttpPipelinedResponse implements Comparable, ReferenceCounted { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipeliningHandler.java index b96b7f5b32276..54cdbd3ba9d47 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipeliningHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipeliningHandler.java @@ -1,4 +1,3 @@ - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -24,44 +23,42 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPromise; import io.netty.handler.codec.http.LastHttpContent; -import io.netty.util.ReferenceCountUtil; -import org.elasticsearch.action.termvectors.TermVectorsFilter; -import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.transport.netty4.Netty4Utils; import java.util.Collections; import java.util.PriorityQueue; -import java.util.Queue; /** - * Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their - * corresponding requests. NOTE: A side effect of using this handler is that upstream HttpRequest objects will - * cause the original message event to be effectively transformed into an OrderedUpstreamMessageEvent. Conversely - * OrderedDownstreamChannelEvent objects are expected to be received for the correlating response objects. + * Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests. */ public class HttpPipeliningHandler extends ChannelDuplexHandler { - private static final int INITIAL_EVENTS_HELD = 3; + // we use a priority queue so that responses are ordered by their sequence number + private final PriorityQueue holdingQueue; private final int maxEventsHeld; + /* + * The current read and write sequence numbers. Read sequence numbers are attached to requests in the order they are read from the + * channel, and then transferred to responses. A response is not written to the channel context until its sequence number matches the + * current write sequence, implying that all preceding messages have been written. + */ private int readSequence; private int writeSequence; - private final Queue holdingQueue; - /** - * @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel - * connection. This is required as events cannot queue up indefinitely; we would run out of - * memory if this was the case. + * Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation. + * + * @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is + * required as events cannot queue up indefinitely */ public HttpPipeliningHandler(final int maxEventsHeld) { this.maxEventsHeld = maxEventsHeld; - this.holdingQueue = new PriorityQueue<>(INITIAL_EVENTS_HELD); + this.holdingQueue = new PriorityQueue<>(1); } @Override - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + public void channelRead(final ChannelHandlerContext ctx, final Object msg) throws Exception { if (msg instanceof LastHttpContent) { ctx.fireChannelRead(new HttpPipelinedRequest(((LastHttpContent) msg).retain(), readSequence++)); } else { @@ -70,21 +67,39 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception } @Override - public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) throws Exception { if (msg instanceof HttpPipelinedResponse) { + final HttpPipelinedResponse current = (HttpPipelinedResponse) msg; + /* + * We attach the promise to the response. When we invoke a write on the channel with the response, we must ensure that we invoke + * the write methods that accept the same promise that we have attached to the response otherwise as the response proceeds + * through the handler pipeline a different promise will be used until reaching this handler. Therefore, we assert here that the + * attached promise is identical to the provided promise as a safety mechanism that we are respecting this. + */ + assert current.promise() == promise; + boolean channelShouldClose = false; synchronized (holdingQueue) { if (holdingQueue.size() < maxEventsHeld) { - holdingQueue.add((HttpPipelinedResponse) msg); + holdingQueue.add(current); while (!holdingQueue.isEmpty()) { - final HttpPipelinedResponse response = holdingQueue.peek(); - if (response.sequence() != writeSequence) { + /* + * Since the response with the lowest sequence number is the top of the priority queue, we know if its sequence + * number does not match the current write sequence number then we have not processed all preceding responses yet. + */ + final HttpPipelinedResponse top = holdingQueue.peek(); + if (top.sequence() != writeSequence) { break; } holdingQueue.remove(); - ctx.write(response.response(), response.promise()); + /* + * We must use the promise attached to the response; this is necessary since are going to hold a response until all + * responses that precede it in the pipeline are written first. Note that the promise from the method invocation is + * not ignored, it will already be attached to an existing response and consumed when that response is drained. + */ + ctx.write(top.response(), top.promise()); writeSequence++; } } else { @@ -96,7 +111,7 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) try { Netty4Utils.closeChannels(Collections.singletonList(ctx.channel())); } finally { - ((HttpPipelinedResponse) msg).release(); + current.release(); promise.setSuccess(); } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java index 59f9447d61c7a..2bea814221297 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java @@ -61,6 +61,9 @@ public List> getSettings() { Netty4HttpServerTransport.SETTING_HTTP_TCP_REUSE_ADDRESS, Netty4HttpServerTransport.SETTING_HTTP_TCP_SEND_BUFFER_SIZE, Netty4HttpServerTransport.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE, + Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, + Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN, + Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX, Netty4Transport.WORKER_COUNT, Netty4Transport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY, Netty4Transport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 5ff7823e37212..d5700b8682a7a 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -50,6 +50,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -106,24 +107,12 @@ public class Netty4Transport extends TcpTransport { public static final Setting NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, Property.NodeScope, Property.Shared); - // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one public static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( - "transport.netty.receive_predictor_size", - settings -> { - long defaultReceiverPredictor = 512 * 1024; - if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes() > 0) { - // we can guess a better default... - long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().getBytes()) / WORKER_COUNT.get(settings)); - defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024)); - } - return new ByteSizeValue(defaultReceiverPredictor).toString(); - }, - Property.NodeScope, - Property.Shared); + "transport.netty.receive_predictor_size", new ByteSizeValue(32, ByteSizeUnit.KB), Property.NodeScope); public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = - byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope, Property.Shared); + byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = - byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope, Property.Shared); + byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); public static final Setting NETTY_BOSS_COUNT = intSetting("transport.netty.boss_count", 1, 1, Property.NodeScope, Property.Shared); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java index 20844a4007c2e..c075afd463f4d 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java @@ -41,14 +41,19 @@ import io.netty.handler.codec.http.HttpVersion; import io.netty.util.Attribute; import io.netty.util.AttributeKey; - import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ByteArray; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; +import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; @@ -59,6 +64,7 @@ import org.junit.After; import org.junit.Before; +import java.io.UnsupportedEncodingException; import java.net.SocketAddress; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -70,6 +76,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -217,6 +224,25 @@ public void testHeadersSet() { } } + public void testReleaseOnSendToClosedChannel() { + final Settings settings = Settings.builder().build(); + final NamedXContentRegistry registry = xContentRegistry(); + try (Netty4HttpServerTransport httpServerTransport = + new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, registry, new NullDispatcher())) { + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); + final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); + final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null; + final Netty4HttpChannel channel = + new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, randomBoolean(), threadPool.getThreadContext()); + final TestResponse response = new TestResponse(bigArrays); + assertThat(response.content(), instanceOf(Releasable.class)); + embeddedChannel.close(); + channel.sendResponse(response); + // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released + } + } + public void testConnectionClose() throws Exception { final Settings settings = Settings.builder().build(); try (Netty4HttpServerTransport httpServerTransport = @@ -508,6 +534,24 @@ List getWrittenObjects() { private static class TestResponse extends RestResponse { + private final BytesReference reference; + + TestResponse() { + reference = Netty4Utils.toBytesReference(Unpooled.copiedBuffer("content", StandardCharsets.UTF_8)); + } + + TestResponse(final BigArrays bigArrays) { + final byte[] bytes; + try { + bytes = "content".getBytes("UTF-8"); + } catch (final UnsupportedEncodingException e) { + throw new AssertionError(e); + } + final ByteArray bigArray = bigArrays.newByteArray(bytes.length); + bigArray.set(0, bytes, 0, bytes.length); + reference = new ReleasablePagedBytesReference(bigArrays, bigArray, bytes.length); + } + @Override public String contentType() { return "text"; @@ -515,7 +559,7 @@ public String contentType() { @Override public BytesReference content() { - return Netty4Utils.toBytesReference(Unpooled.copiedBuffer("content", StandardCharsets.UTF_8)); + return reference; } @Override diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java index c9ca5068faf0e..d384479b4e5bb 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -24,6 +24,7 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.FullHttpRequest; @@ -255,11 +256,14 @@ public void run() { assert uri.matches("/\\d+"); } + final ChannelPromise promise = ctx.newPromise(); + final Object msg; if (pipelinedRequest != null) { - ctx.writeAndFlush(pipelinedRequest.createHttpResponse(httpResponse, ctx.channel().newPromise())); + msg = pipelinedRequest.createHttpResponse(httpResponse, promise); } else { - ctx.writeAndFlush(httpResponse); + msg = httpResponse; } + ctx.writeAndFlush(msg, promise); } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index 38abf92c857d6..9af77e576ffbd 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -109,12 +109,12 @@ public void shutdown() throws Exception { public void testCorsConfig() { final Set methods = new HashSet<>(Arrays.asList("get", "options", "post")); final Set headers = new HashSet<>(Arrays.asList("Content-Type", "Content-Length")); - final String suffix = randomBoolean() ? " " : ""; // sometimes have a leading whitespace between comma delimited elements + final String prefix = randomBoolean() ? " " : ""; // sometimes have a leading whitespace between comma delimited elements final Settings settings = Settings.builder() .put(SETTING_CORS_ENABLED.getKey(), true) .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "*") - .put(SETTING_CORS_ALLOW_METHODS.getKey(), collectionToDelimitedString(methods, ",", suffix, "")) - .put(SETTING_CORS_ALLOW_HEADERS.getKey(), collectionToDelimitedString(headers, ",", suffix, "")) + .put(SETTING_CORS_ALLOW_METHODS.getKey(), collectionToDelimitedString(methods, ",", prefix, "")) + .put(SETTING_CORS_ALLOW_HEADERS.getKey(), collectionToDelimitedString(headers, ",", prefix, "")) .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) .build(); final Netty4CorsConfig corsConfig = Netty4HttpServerTransport.buildCorsConfig(settings); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/pipelining/Netty4HttpPipeliningHandlerTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/pipelining/Netty4HttpPipeliningHandlerTests.java index 1feb92223a352..ce8e840e246ce 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/pipelining/Netty4HttpPipeliningHandlerTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/pipelining/Netty4HttpPipeliningHandlerTests.java @@ -37,7 +37,6 @@ import io.netty.handler.codec.http.LastHttpContent; import io.netty.handler.codec.http.QueryStringDecoder; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.test.ESTestCase; import org.junit.After; diff --git a/plugins/discovery-file/build.gradle b/plugins/discovery-file/build.gradle index 3b78f06505e80..c6d622f724809 100644 --- a/plugins/discovery-file/build.gradle +++ b/plugins/discovery-file/build.gradle @@ -39,7 +39,7 @@ task setupSeedNodeAndUnicastHostsFile(type: DefaultTask) { // for unicast discovery ClusterConfiguration config = new ClusterConfiguration(project) config.clusterName = 'discovery-file-test-cluster' -List nodes = ClusterFormationTasks.setup(project, setupSeedNodeAndUnicastHostsFile, config) +List nodes = ClusterFormationTasks.setup(project, 'initialCluster', setupSeedNodeAndUnicastHostsFile, config) File srcUnicastHostsFile = file('build/cluster/unicast_hosts.txt') // write the unicast_hosts.txt file to a temporary location to be used by the second cluster @@ -49,11 +49,13 @@ setupSeedNodeAndUnicastHostsFile.doLast { } // second cluster, which will connect to the first via the unicast_hosts.txt file +integTestCluster { + clusterName = 'discovery-file-test-cluster' + extraConfigFile 'discovery-file/unicast_hosts.txt', srcUnicastHostsFile +} + +integTestRunner.finalizedBy ':plugins:discovery-file:initialCluster#stop' + integTest { dependsOn(setupSeedNodeAndUnicastHostsFile) - cluster { - clusterName = 'discovery-file-test-cluster' - extraConfigFile 'discovery-file/unicast_hosts.txt', srcUnicastHostsFile - } - finalizedBy ':plugins:discovery-file:setupSeedNodeAndUnicastHostsFile#stop' } diff --git a/plugins/ingest-user-agent/build.gradle b/plugins/ingest-user-agent/build.gradle index ec599874d155b..2b2669a40042b 100644 --- a/plugins/ingest-user-agent/build.gradle +++ b/plugins/ingest-user-agent/build.gradle @@ -22,8 +22,6 @@ esplugin { classname 'org.elasticsearch.ingest.useragent.IngestUserAgentPlugin' } -integTest { - cluster { - extraConfigFile 'ingest-user-agent/test-regexes.yaml', 'test/test-regexes.yaml' - } -} \ No newline at end of file +integTestCluster { + extraConfigFile 'ingest-user-agent/test-regexes.yaml', 'test/test-regexes.yaml' +} diff --git a/plugins/jvm-example/build.gradle b/plugins/jvm-example/build.gradle index c5828d9c86df5..3753d3ae83101 100644 --- a/plugins/jvm-example/build.gradle +++ b/plugins/jvm-example/build.gradle @@ -43,6 +43,8 @@ task exampleFixture(type: org.elasticsearch.gradle.test.Fixture) { integTest { dependsOn exampleFixture +} +integTestRunner { systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" } diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 125b7bdd6c5c3..a2c3df17d80a1 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -41,10 +41,8 @@ thirdPartyAudit.excludes = [ 'org.slf4j.LoggerFactory', ] -integTest { - cluster { - setting 'cloud.azure.storage.my_account_test.account', 'cloudazureresource' - setting 'cloud.azure.storage.my_account_test.key', 'abcdefgh' - setting 'script.stored', 'true' - } -} \ No newline at end of file +integTestCluster { + setting 'cloud.azure.storage.my_account_test.account', 'cloudazureresource' + setting 'cloud.azure.storage.my_account_test.key', 'abcdefgh' + setting 'script.stored', 'true' +} diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java index cafcb6b98f044..ce47bd44f0b28 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java @@ -25,6 +25,7 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.Protocol; +import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.InstanceProfileCredentialsProvider; @@ -35,6 +36,7 @@ import com.amazonaws.services.s3.S3ClientOptions; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cloud.aws.util.SocketAccess; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -141,7 +143,6 @@ static ClientConfiguration buildConfiguration(Logger logger, Settings repository public static AWSCredentialsProvider buildCredentials(Logger logger, DeprecationLogger deprecationLogger, Settings settings, Settings repositorySettings, String clientName) { - AWSCredentialsProvider credentials; try (SecureString key = getConfigValue(repositorySettings, settings, clientName, S3Repository.ACCESS_KEY_SETTING, S3Repository.Repository.KEY_SETTING, S3Repository.Repositories.KEY_SETTING); SecureString secret = getConfigValue(repositorySettings, settings, clientName, S3Repository.SECRET_KEY_SETTING, @@ -149,14 +150,23 @@ public static AWSCredentialsProvider buildCredentials(Logger logger, Deprecation if (key.length() == 0 && secret.length() == 0) { logger.debug("Using instance profile credentials"); - credentials = new InstanceProfileCredentialsProvider(); + AWSCredentialsProvider credentials = new InstanceProfileCredentialsProvider(); + return new AWSCredentialsProvider() { + @Override + public AWSCredentials getCredentials() { + return SocketAccess.doPrivileged(credentials::getCredentials); + } + + @Override + public void refresh() { + SocketAccess.doPrivilegedVoid(credentials::refresh); + } + }; } else { logger.debug("Using basic key/secret credentials"); - credentials = new StaticCredentialsProvider(new BasicAWSCredentials(key.toString(), secret.toString())); + return new StaticCredentialsProvider(new BasicAWSCredentials(key.toString(), secret.toString())); } } - - return credentials; } // pkg private for tests diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java index e11dade7953d9..73252102c2fae 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java @@ -23,13 +23,12 @@ import com.amazonaws.Protocol; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.InstanceProfileCredentialsProvider; + import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.s3.S3Repository; import org.elasticsearch.test.ESTestCase; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -38,7 +37,7 @@ public class AwsS3ServiceImplTests extends ESTestCase { public void testAWSCredentialsWithSystemProviders() { AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, Settings.EMPTY, Settings.EMPTY, "default"); - assertThat(credentialsProvider, instanceOf(InstanceProfileCredentialsProvider.class)); + assertThat(credentialsProvider, instanceOf(AWSCredentialsProvider.class)); } public void testAwsCredsDefaultSettings() { diff --git a/qa/backwards-5.0/build.gradle b/qa/backwards-5.0/build.gradle index e85599229a81d..90dd2f2d8bbf6 100644 --- a/qa/backwards-5.0/build.gradle +++ b/qa/backwards-5.0/build.gradle @@ -35,12 +35,13 @@ apply plugin: 'elasticsearch.rest-test' */ integTest { includePackaged = true - cluster { - numNodes = 4 - numBwcNodes = 2 - bwcVersion = "5.4.0-SNAPSHOT" - setting 'logger.org.elasticsearch', 'DEBUG' - } +} + +integTestCluster { + numNodes = 4 + numBwcNodes = 2 + bwcVersion = "5.4.0-SNAPSHOT" + setting 'logger.org.elasticsearch', 'DEBUG' } repositories { diff --git a/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java index 10b69fb297b18..5e2655ff64c68 100644 --- a/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -82,7 +82,7 @@ private void updateIndexSetting(String name, Settings settings) throws IOExcepti new StringEntity(Strings.toString(settings), ContentType.APPLICATION_JSON))); } - protected int indexDocs(String index, final int idStart, final int numDocs) throws IOException { + private int indexDocs(String index, final int idStart, final int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { final int id = idStart + i; assertOK(client().performRequest("PUT", index + "/test/" + id, emptyMap(), @@ -91,6 +91,116 @@ protected int indexDocs(String index, final int idStart, final int numDocs) thro return numDocs; } + /** + * Indexes a document in index with docId then concurrently updates the same document + * nUpdates times + * + * @return the document version after updates + */ + private int indexDocWithConcurrentUpdates(String index, final int docId, int nUpdates) throws IOException, InterruptedException { + indexDocs(index, docId, 1); + Thread[] indexThreads = new Thread[nUpdates]; + for (int i = 0; i < nUpdates; i++) { + indexThreads[i] = new Thread(() -> { + try { + indexDocs(index, docId, 1); + } catch (IOException e) { + throw new AssertionError("failed while indexing [" + e.getMessage() + "]"); + } + }); + indexThreads[i].start(); + } + for (Thread indexThread : indexThreads) { + indexThread.join(); + } + return nUpdates + 1; + } + + public void testIndexVersionPropagation() throws Exception { + Nodes nodes = buildNodeAndVersions(); + assumeFalse("new nodes is empty", nodes.getNewNodes().isEmpty()); + logger.info("cluster discovered: {}", nodes.toString()); + final List bwcNamesList = nodes.getBWCNodes().stream().map(Node::getNodeName).collect(Collectors.toList()); + final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) + .put("index.routing.allocation.include._name", bwcNames); + final String index = "test"; + final int minUpdates = 5; + final int maxUpdates = 10; + createIndex(index, settings.build()); + try (RestClient newNodeClient = buildClient(restClientSettings(), + nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { + + int nUpdates = randomIntBetween(minUpdates, maxUpdates); + logger.info("indexing docs with [{}] concurrent updates initially", nUpdates); + final int finalVersionForDoc1 = indexDocWithConcurrentUpdates(index, 1, nUpdates); + logger.info("allowing shards on all nodes"); + updateIndexSetting(index, Settings.builder().putNull("index.routing.allocation.include._name")); + ensureGreen(); + assertOK(client().performRequest("POST", index + "/_refresh")); + List shards = buildShards(nodes, newNodeClient); + for (Shard shard : shards) { + assertVersion(index, 1, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc1); + assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 1); + } + + nUpdates = randomIntBetween(minUpdates, maxUpdates); + logger.info("indexing docs with [{}] concurrent updates after allowing shards on all nodes", nUpdates); + final int finalVersionForDoc2 = indexDocWithConcurrentUpdates(index, 2, nUpdates); + assertOK(client().performRequest("POST", index + "/_refresh")); + shards = buildShards(nodes, newNodeClient); + for (Shard shard : shards) { + assertVersion(index, 2, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc2); + assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 2); + } + + Shard primary = buildShards(nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); + logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName()); + updateIndexSetting(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName())); + ensureGreen(); + nUpdates = randomIntBetween(minUpdates, maxUpdates); + logger.info("indexing docs with [{}] concurrent updates after moving primary", nUpdates); + final int finalVersionForDoc3 = indexDocWithConcurrentUpdates(index, 3, nUpdates); + assertOK(client().performRequest("POST", index + "/_refresh")); + shards = buildShards(nodes, newNodeClient); + for (Shard shard : shards) { + assertVersion(index, 3, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc3); + assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 3); + } + + logger.info("setting number of replicas to 0"); + updateIndexSetting(index, Settings.builder().put("index.number_of_replicas", 0)); + ensureGreen(); + nUpdates = randomIntBetween(minUpdates, maxUpdates); + logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 0", nUpdates); + final int finalVersionForDoc4 = indexDocWithConcurrentUpdates(index, 4, nUpdates); + assertOK(client().performRequest("POST", index + "/_refresh")); + shards = buildShards(nodes, newNodeClient); + for (Shard shard : shards) { + assertVersion(index, 4, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc4); + assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 4); + } + + logger.info("setting number of replicas to 1"); + updateIndexSetting(index, Settings.builder().put("index.number_of_replicas", 1)); + ensureGreen(); + nUpdates = randomIntBetween(minUpdates, maxUpdates); + logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 1", nUpdates); + final int finalVersionForDoc5 = indexDocWithConcurrentUpdates(index, 5, nUpdates); + assertOK(client().performRequest("POST", index + "/_refresh")); + shards = buildShards(nodes, newNodeClient); + for (Shard shard : shards) { + assertVersion(index, 5, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc5); + assertCount(index, "_only_nodes:" + shard.getNode().getNodeName(), 5); + } + // the number of documents on the primary and on the recovered replica should match the number of indexed documents + assertCount(index, "_primary", 5); + assertCount(index, "_replica", 5); + } + } + public void testSeqNoCheckpoints() throws Exception { Nodes nodes = buildNodeAndVersions(); assumeFalse("new nodes is empty", nodes.getNewNodes().isEmpty()); @@ -166,6 +276,14 @@ private void assertCount(final String index, final String preference, final int assertThat(actualCount, equalTo(expectedCount)); } + private void assertVersion(final String index, final int docId, final String preference, final int expectedVersion) throws IOException { + final Response response = client().performRequest("GET", index + "/test/" + docId, + Collections.singletonMap("preference", preference)); + assertOK(response); + final int actualVersion = Integer.parseInt(objectPath(response).evaluate("_version").toString()); + assertThat("version mismatch for doc [" + docId + "] preference [" + preference + "]", actualVersion, equalTo(expectedVersion)); + } + private void assertSeqNoOnShards(Nodes nodes, boolean checkGlobalCheckpoints, int numDocs, RestClient client) throws Exception { assertBusy(() -> { try { diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 385c547cf6dea..c6e34963af9dc 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -23,25 +23,33 @@ apply plugin: 'elasticsearch.standalone-test' task remoteClusterTest(type: RestIntegTestTask) { mustRunAfter(precommit) - cluster { - distribution = 'zip' - numNodes = 2 - clusterName = 'remote-cluster' - setting 'search.remote.connect', false - } +} + +remoteClusterTestCluster { + distribution = 'zip' + numNodes = 2 + clusterName = 'remote-cluster' + setting 'search.remote.connect', false +} + +remoteClusterTestRunner { systemProperty 'tests.rest.suite', 'remote_cluster' } task mixedClusterTest(type: RestIntegTestTask) { - dependsOn(remoteClusterTest) - cluster { - distribution = 'zip' - setting 'search.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\"" - setting 'search.remote.connections_per_cluster', 1 - setting 'search.remote.connect', true - } + dependsOn(remoteClusterTestRunner) +} + +mixedClusterTestCluster { + distribution = 'zip' + setting 'search.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\"" + setting 'search.remote.connections_per_cluster', 1 + setting 'search.remote.connect', true +} + +mixedClusterTestRunner { systemProperty 'tests.rest.suite', 'multi_cluster' - finalizedBy 'remoteClusterTest#node0.stop','remoteClusterTest#node1.stop' + finalizedBy 'remoteClusterTestCluster#node0.stop','remoteClusterTestCluster#node1.stop' } task integTest { diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 1d79bb404fdd9..f4cde58d75577 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -23,43 +23,55 @@ apply plugin: 'elasticsearch.standalone-test' task oldClusterTest(type: RestIntegTestTask) { mustRunAfter(precommit) - cluster { - distribution = 'zip' - bwcVersion = '5.4.0-SNAPSHOT' // TODO: either randomize, or make this settable with sysprop - numBwcNodes = 2 - numNodes = 2 - clusterName = 'rolling-upgrade' - setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - setting 'http.content_type.required', 'true' - } +} + +oldClusterTestCluster { + distribution = 'zip' + bwcVersion = '5.4.0-SNAPSHOT' // TODO: either randomize, or make this settable with sysprop + numBwcNodes = 2 + numNodes = 2 + clusterName = 'rolling-upgrade' + setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + setting 'http.content_type.required', 'true' +} + +oldClusterTestRunner { systemProperty 'tests.rest.suite', 'old_cluster' } task mixedClusterTest(type: RestIntegTestTask) { - dependsOn(oldClusterTest, 'oldClusterTest#node1.stop') - cluster { - distribution = 'zip' - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } - dataDir = "${-> oldClusterTest.nodes[1].dataDir}" - setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - } + dependsOn(oldClusterTestRunner, 'oldClusterTestCluster#node1.stop') +} + +mixedClusterTestCluster { + distribution = 'zip' + clusterName = 'rolling-upgrade' + unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } + dataDir = "${-> oldClusterTest.nodes[1].dataDir}" + setting 'repositories.url.allowed_urls', 'http://snapshot.test*' +} + +mixedClusterTestRunner { systemProperty 'tests.rest.suite', 'mixed_cluster' - finalizedBy 'oldClusterTest#node0.stop' + finalizedBy 'oldClusterTestCluster#node0.stop' } task upgradedClusterTest(type: RestIntegTestTask) { - dependsOn(mixedClusterTest, 'oldClusterTest#node0.stop') - cluster { - distribution = 'zip' - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() } - dataDir = "${-> oldClusterTest.nodes[0].dataDir}" - setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - } + dependsOn(mixedClusterTestRunner, 'oldClusterTestCluster#node0.stop') +} + +upgradedClusterTestCluster { + distribution = 'zip' + clusterName = 'rolling-upgrade' + unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() } + dataDir = "${-> oldClusterTest.nodes[0].dataDir}" + setting 'repositories.url.allowed_urls', 'http://snapshot.test*' +} + +upgradedClusterTestRunner { systemProperty 'tests.rest.suite', 'upgraded_cluster' // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion - finalizedBy 'mixedClusterTest#stop' + finalizedBy 'mixedClusterTestCluster#stop' } task integTest { diff --git a/qa/smoke-test-ingest-disabled/build.gradle b/qa/smoke-test-ingest-disabled/build.gradle index 4c4d9c2da1259..cf3ca9a713f96 100644 --- a/qa/smoke-test-ingest-disabled/build.gradle +++ b/qa/smoke-test-ingest-disabled/build.gradle @@ -24,8 +24,6 @@ dependencies { testCompile project(path: ':modules:ingest-common', configuration: 'runtime') } -integTest { - cluster { - setting 'node.ingest', 'false' - } +integTestCluster { + setting 'node.ingest', 'false' } diff --git a/qa/smoke-test-ingest-with-all-dependencies/build.gradle b/qa/smoke-test-ingest-with-all-dependencies/build.gradle index 2cfa3af434ed9..1608ce9c37c25 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/build.gradle +++ b/qa/smoke-test-ingest-with-all-dependencies/build.gradle @@ -28,10 +28,8 @@ dependencies { testCompile project(path: ':modules:reindex', configuration: 'runtime') } -integTest { - cluster { - plugin ':plugins:ingest-geoip' - setting 'script.inline', 'true' - setting 'path.scripts', "${project.buildDir}/resources/test/scripts" - } +integTestCluster { + plugin ':plugins:ingest-geoip' + setting 'script.inline', 'true' + setting 'path.scripts', "${project.buildDir}/resources/test/scripts" } diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index fc196fd52a4a4..5df77bd0d9513 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -22,7 +22,8 @@ apply plugin: 'elasticsearch.rest-test' integTest { includePackaged = true - cluster { - numNodes = 2 - } +} + +integTestCluster { + numNodes = 2 } diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index 6fd722e409c87..d60216dad194f 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -24,10 +24,8 @@ apply plugin: 'elasticsearch.rest-test' ext.pluginsCount = 0 project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj -> - integTest { - cluster { - plugin subproj.path - } + integTestCluster { + plugin subproj.path } pluginsCount += 1 } diff --git a/qa/smoke-test-reindex-with-painless/build.gradle b/qa/smoke-test-reindex-with-painless/build.gradle index 7092c0a7b487a..b32f4ee80bef9 100644 --- a/qa/smoke-test-reindex-with-painless/build.gradle +++ b/qa/smoke-test-reindex-with-painless/build.gradle @@ -20,8 +20,6 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' -integTest { - cluster { - setting 'script.max_compilations_per_minute', '1000' - } +integTestCluster { + setting 'script.max_compilations_per_minute', '1000' } diff --git a/qa/smoke-test-tribe-node/build.gradle b/qa/smoke-test-tribe-node/build.gradle index 94789b17fdbd9..4ddb178068d2f 100644 --- a/qa/smoke-test-tribe-node/build.gradle +++ b/qa/smoke-test-tribe-node/build.gradle @@ -24,47 +24,32 @@ import org.elasticsearch.gradle.test.NodeInfo apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' -List oneNodes -task setupClusterOne(type: DefaultTask) { - mustRunAfter(precommit) - ClusterConfiguration configOne = new ClusterConfiguration(project) - configOne.clusterName = 'one' - configOne.setting('node.name', 'one') - oneNodes = ClusterFormationTasks.setup(project, setupClusterOne, configOne) -} - -List twoNodes +ClusterConfiguration configOne = new ClusterConfiguration(project) +configOne.clusterName = 'one' +configOne.setting('node.name', 'one') +List oneNodes = ClusterFormationTasks.setup(project, 'clusterOne', integTestRunner, configOne) -task setupClusterTwo(type: DefaultTask) { - mustRunAfter(precommit) - ClusterConfiguration configTwo = new ClusterConfiguration(project) - configTwo.clusterName = 'two' - configTwo.setting('node.name', 'two') - twoNodes = ClusterFormationTasks.setup(project, setupClusterTwo, configTwo) -} +ClusterConfiguration configTwo = new ClusterConfiguration(project) +configTwo.clusterName = 'two' +configTwo.setting('node.name', 'two') +List twoNodes = ClusterFormationTasks.setup(project, 'clusterTwo', integTestRunner, configTwo) -integTest { - dependsOn(setupClusterOne, setupClusterTwo) - cluster { - // tribe nodes had a bug where if explicit ports was specified for the tribe node, the dynamic socket permissions that were applied - // would not account for the fact that the internal node client needed to bind to sockets too; thus, we use explicit port ranges to - // ensure that the code that fixes this bug is exercised - setting 'http.port', '40200-40249' - setting 'transport.tcp.port', '40300-40349' - setting 'node.name', 'quest' - setting 'tribe.one.cluster.name', 'one' - setting 'tribe.one.discovery.zen.ping.unicast.hosts', "'${-> oneNodes.get(0).transportUri()}'" - setting 'tribe.one.http.enabled', 'true' - setting 'tribe.one.http.port', '40250-40299' - setting 'tribe.one.transport.tcp.port', '40350-40399' - setting 'tribe.two.cluster.name', 'two' - setting 'tribe.two.discovery.zen.ping.unicast.hosts', "'${-> twoNodes.get(0).transportUri()}'" - setting 'tribe.two.http.enabled', 'true' - setting 'tribe.two.http.port', '40250-40299' - setting 'tribe.two.transport.tcp.port', '40250-40399' - } - // need to kill the standalone nodes here - finalizedBy 'setupClusterOne#stop' - finalizedBy 'setupClusterTwo#stop' +integTestCluster { + // tribe nodes had a bug where if explicit ports was specified for the tribe node, the dynamic socket permissions that were applied + // would not account for the fact that the internal node client needed to bind to sockets too; thus, we use explicit port ranges to + // ensure that the code that fixes this bug is exercised + setting 'http.port', '40200-40249' + setting 'transport.tcp.port', '40300-40349' + setting 'node.name', 'quest' + setting 'tribe.one.cluster.name', 'one' + setting 'tribe.one.discovery.zen.ping.unicast.hosts', "'${-> oneNodes.get(0).transportUri()}'" + setting 'tribe.one.http.enabled', 'true' + setting 'tribe.one.http.port', '40250-40299' + setting 'tribe.one.transport.tcp.port', '40350-40399' + setting 'tribe.two.cluster.name', 'two' + setting 'tribe.two.discovery.zen.ping.unicast.hosts', "'${-> twoNodes.get(0).transportUri()}'" + setting 'tribe.two.http.enabled', 'true' + setting 'tribe.two.http.port', '40250-40299' + setting 'tribe.two.transport.tcp.port', '40250-40399' } diff --git a/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats b/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats index a14823a9cc458..9f5bf9c502456 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats @@ -58,13 +58,13 @@ setup() { } @test "[UPGRADE] index some documents into a few indexes" { - curl -s -XPOST localhost:9200/library/book/1?pretty -d '{ + curl -s -H "Content-Type: application/json" -XPOST localhost:9200/library/book/1?pretty -d '{ "title": "Elasticsearch - The Definitive Guide" }' - curl -s -XPOST localhost:9200/library/book/2?pretty -d '{ + curl -s -H "Content-Type: application/json" -XPOST localhost:9200/library/book/2?pretty -d '{ "title": "Brave New World" }' - curl -s -XPOST localhost:9200/library2/book/1?pretty -d '{ + curl -s -H "Content-Type: application/json" -XPOST localhost:9200/library2/book/1?pretty -d '{ "title": "The Left Hand of Darkness" }' } diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index 7b226922814ea..143430a542fd2 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -481,12 +481,12 @@ run_elasticsearch_tests() { [ "$status" -eq 0 ] echo "$output" | grep -w "green" - curl -s -XPOST 'http://localhost:9200/library/book/1?refresh=true&pretty' -d '{ + curl -s -H "Content-Type: application/json" -XPOST 'http://localhost:9200/library/book/1?refresh=true&pretty' -d '{ "title": "Book #1", "pages": 123 }' - curl -s -XPOST 'http://localhost:9200/library/book/2?refresh=true&pretty' -d '{ + curl -s -H "Content-Type: application/json" -XPOST 'http://localhost:9200/library/book/2?refresh=true&pretty' -d '{ "title": "Book #2", "pages": 456 }' @@ -494,7 +494,7 @@ run_elasticsearch_tests() { curl -s -XGET 'http://localhost:9200/_count?pretty' | grep \"count\"\ :\ 2 - curl -s -XPOST 'http://localhost:9200/library/book/_count?pretty' -d '{ + curl -s -H "Content-Type: application/json" -XPOST 'http://localhost:9200/library/book/_count?pretty' -d '{ "query": { "script": { "script": { @@ -508,7 +508,7 @@ run_elasticsearch_tests() { } }' | grep \"count\"\ :\ 2 - curl -s -XGET 'http://localhost:9200/library/book/_search/template?pretty' -d '{ + curl -s -H "Content-Type: application/json" -XGET 'http://localhost:9200/library/book/_search/template?pretty' -d '{ "file": "is_guide" }' | grep \"total\"\ :\ 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json new file mode 100644 index 0000000000000..3e561a21146e6 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json @@ -0,0 +1,71 @@ +{ + "exists_source": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "methods": ["HEAD"], + "url": { + "path": "/{index}/{type}/{id}/_source", + "paths": ["/{index}/{type}/{id}/_source"], + "parts": { + "id": { + "type" : "string", + "required" : true, + "description" : "The document ID" + }, + "index": { + "type" : "string", + "required" : true, + "description" : "The name of the index" + }, + "type": { + "type" : "string", + "required" : true, + "description" : "The type of the document; use `_all` to fetch the first document matching the ID across all types" + } + }, + "params": { + "parent": { + "type" : "string", + "description" : "The ID of the parent document" + }, + "preference": { + "type" : "string", + "description" : "Specify the node or shard the operation should be performed on (default: random)" + }, + "realtime": { + "type" : "boolean", + "description" : "Specify whether to perform the operation in realtime or search mode" + }, + "refresh": { + "type" : "boolean", + "description" : "Refresh the shard containing the document before performing the operation" + }, + "routing": { + "type" : "string", + "description" : "Specific routing value" + }, + "_source": { + "type" : "list", + "description" : "True or false to return the _source field or not, or a list of fields to return" + }, + "_source_exclude": { + "type" : "list", + "description" : "A list of fields to exclude from the returned _source field" + }, + "_source_include": { + "type" : "list", + "description" : "A list of fields to extract and return from the _source field" + }, + "version" : { + "type" : "number", + "description" : "Explicit version number for concurrency control" + }, + "version_type": { + "type" : "enum", + "options" : ["internal", "external", "external_gte", "force"], + "description" : "Specific version type" + } + } + }, + "body": null + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json index 4ab053cd11873..7f04f00b40def 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json @@ -1,27 +1,44 @@ { "indices.exists": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html", - "methods": ["HEAD"], + "methods": [ "HEAD" ], "url": { "path": "/{index}", - "paths": ["/{index}"], + "paths": [ "/{index}" ], "parts": { "index": { - "type" : "list", - "required" : true, - "description" : "A comma-separated list of indices to check" + "type": "list", + "required": true, + "description": "A comma-separated list of index names" } }, "params": { + "local": { + "type": "boolean", + "description": "Return local information, do not retrieve the state from master node (default: false)" + }, + "ignore_unavailable": { + "type": "boolean", + "description": "Ignore unavailable indexes (default: false)" + }, + "allow_no_indices": { + "type": "boolean", + "description": "Ignore if a wildcard expression resolves to no concrete indices (default: false)" + }, "expand_wildcards": { - "type" : "enum", - "options" : ["open","closed","none","all"], - "default" : "open", - "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + "type": "enum", + "options": [ "open", "closed", "none", "all" ], + "default": "open", + "description": "Whether wildcard expressions should get expanded to open or closed indices (default: open)" }, - "local": { - "type": "boolean", - "description": "Return local information, do not retrieve the state from master node (default: false)" + "flat_settings": { + "type": "boolean", + "description": "Return settings in flat format (default: false)" + }, + "include_defaults": { + "type": "boolean", + "description": "Whether to return all default setting for each of the indices.", + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json index 8862481c18fbc..8891aebd223ec 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json @@ -4,7 +4,7 @@ "methods": ["HEAD"], "url": { "path": "/_alias/{name}", - "paths": ["/_alias/{name}", "/{index}/_alias/{name}", "/{index}/_alias"], + "paths": ["/_alias/{name}", "/{index}/_alias/{name}"], "parts": { "index": { "type" : "list", @@ -17,22 +17,22 @@ }, "params": { "ignore_unavailable": { - "type" : "boolean", - "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + "type" : "boolean", + "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" }, "allow_no_indices": { - "type" : "boolean", - "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" }, "expand_wildcards": { - "type" : "enum", - "options" : ["open","closed","none","all"], - "default" : ["open", "closed"], - "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + "type" : "enum", + "options" : ["open","closed","none","all"], + "default" : "all", + "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." }, "local": { - "type": "boolean", - "description": "Return local information, do not retrieve the state from master node (default: false)" + "type": "boolean", + "description": "Return local information, do not retrieve the state from master node (default: false)" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json index 18684bc940856..96c4c53cd9dcf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json @@ -4,15 +4,19 @@ "methods": ["HEAD"], "url": { "path": "/_template/{name}", - "paths": ["/_template/{name}"], + "paths": [ "/_template/{name}" ], "parts": { "name": { - "type": "string", - "required": true, - "description": "The name of the template" + "type": "list", + "required": false, + "description": "The comma separated names of the index templates" } }, "params": { + "flat_settings": { + "type": "boolean", + "description": "Return settings in flat format (default: false)" + }, "master_timeout": { "type": "time", "description": "Explicit operation timeout for connection to master node" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 328383e16399f..dc5fda57439f5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -158,6 +158,11 @@ "request_cache": { "type" : "boolean", "description" : "Specify if request cache should be used for this request or not, defaults to index level setting" + }, + "batched_reduce_size" : { + "type" : "number", + "description" : "The number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large.", + "default" : 512 } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/10_basic.yaml index cc9fb95b78414..043800916d19a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_settings/10_basic.yaml @@ -36,7 +36,7 @@ setup: ignore_unavailable: true index: test-index, non-existing body: - number_of_replicas: 1 + index.number_of_replicas: 1 - do: indices.get_settings: {} @@ -81,7 +81,6 @@ setup: indices.get_settings: flat_settings: false - - match: test-index.settings.index.number_of_replicas: "0" - match: @@ -96,8 +95,9 @@ setup: preserve_existing: true index: test-index body: - index.translog.durability: "request" - index.query_string.lenient: "true" + settings: + index.translog.durability: "request" + index.query_string.lenient: "true" - do: indices.get_settings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/120_batch_reduce_size.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/120_batch_reduce_size.yaml new file mode 100644 index 0000000000000..7444b1bba17b4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/120_batch_reduce_size.yaml @@ -0,0 +1,70 @@ +setup: + - do: + indices.create: + index: test_1 + body: + settings: + number_of_shards: 5 + number_of_replicas: 0 + mappings: + test: + properties: + str: + type: keyword + +--- +"batched_reduce_size lower limit": + - skip: + version: " - 5.3.99" + reason: this was added in 5.4.0 + - do: + catch: /batchedReduceSize must be >= 2/ + search: + index: test_1 + batched_reduce_size: 1 + + +--- +"batched_reduce_size 2 with 5 shards": + - skip: + version: " - 5.3.99" + reason: this was added in 5.4.0 + - do: + index: + index: test_1 + type: test + id: 1 + body: { "str" : "abc" } + + - do: + index: + index: test_1 + type: test + id: 2 + body: { "str": "abc" } + + - do: + index: + index: test_1 + type: test + id: 3 + body: { "str": "bcd" } + - do: + indices.refresh: {} + + - do: + search: + batched_reduce_size: 2 + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str" } } } } + + - match: { num_reduce_phases: 4 } + - match: { hits.total: 3 } + - length: { aggregations.str_terms.buckets: 2 } + - match: { aggregations.str_terms.buckets.0.key: "abc" } + - is_false: aggregations.str_terms.buckets.0.key_as_string + - match: { aggregations.str_terms.buckets.0.doc_count: 2 } + - match: { aggregations.str_terms.buckets.1.key: "bcd" } + - is_false: aggregations.str_terms.buckets.1.key_as_string + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } + + diff --git a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java index b1aa781f1a8db..1ea09ca76f8d5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java +++ b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java @@ -152,7 +152,15 @@ public static Tuple, List> randomStoredFieldValues(Random r */ public static BytesReference randomSource(Random random) { //the source can be stored in any format and eventually converted when retrieved depending on the format of the response - XContentType xContentType = RandomPicks.randomFrom(random, XContentType.values()); + return randomSource(random, RandomPicks.randomFrom(random, XContentType.values())); + } + + /** + * Returns a random source in a given XContentType containing a random number of fields, objects and array, with maximum depth 5. + * + * @param random Random generator + */ + public static BytesReference randomSource(Random random, XContentType xContentType) { try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType)) { builder.startObject(); addFields(random, builder, 0); diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java index 5814cac1316bd..a7d9a72e6b77c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java @@ -36,6 +36,7 @@ public class RandomizingClient extends FilterClient { private final SearchType defaultSearchType; private final String defaultPreference; + private final int batchedReduceSize; public RandomizingClient(Client client, Random random) { @@ -53,12 +54,14 @@ public RandomizingClient(Client client, Random random) { } else { defaultPreference = null; } + this.batchedReduceSize = 2 + random.nextInt(10); } - + @Override public SearchRequestBuilder prepareSearch(String... indices) { - return in.prepareSearch(indices).setSearchType(defaultSearchType).setPreference(defaultPreference); + return in.prepareSearch(indices).setSearchType(defaultSearchType).setPreference(defaultPreference) + .setBatchedReduceSize(batchedReduceSize); } @Override