diff --git a/TESTING.asciidoc b/TESTING.asciidoc index a3f92d5db1232..6389f8cb3038d 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -42,7 +42,7 @@ In order to start with a different distribution use the `-Drun.distribution` arg To for example start the open source distribution: ------------------------------------- -./gradlew run -Drun.distribution=oss-zip +./gradlew run -Drun.distribution=oss ------------------------------------- ==== License type diff --git a/build.gradle b/build.gradle index 439bd32727d18..dade656f78b82 100644 --- a/build.gradle +++ b/build.gradle @@ -221,14 +221,14 @@ allprojects { "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${version}": ':modules:lang-painless:spi', "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:archives:integ-test-zip', - "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:archives:zip', - "org.elasticsearch.distribution.zip:elasticsearch-oss:${version}": ':distribution:archives:oss-zip', - "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:archives:tar', - "org.elasticsearch.distribution.tar:elasticsearch-oss:${version}": ':distribution:archives:oss-tar', - "org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:packages:rpm', - "org.elasticsearch.distribution.rpm:elasticsearch-oss:${version}": ':distribution:packages:oss-rpm', - "org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:packages:deb', - "org.elasticsearch.distribution.deb:elasticsearch-oss:${version}": ':distribution:packages:oss-deb', + "downloads.zip:elasticsearch:${version}": ':distribution:archives:zip', + "downloads.zip:elasticsearch-oss:${version}": ':distribution:archives:oss-zip', + "downloads.tar:elasticsearch:${version}": ':distribution:archives:tar', + "downloads.tar:elasticsearch-oss:${version}": ':distribution:archives:oss-tar', + "downloads.rpm:elasticsearch:${version}": ':distribution:packages:rpm', + "downloads.rpm:elasticsearch-oss:${version}": ':distribution:packages:oss-rpm', + "downloads.deb:elasticsearch:${version}": ':distribution:packages:deb', + "downloads.deb:elasticsearch-oss:${version}": ':distribution:packages:oss-deb', "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', "org.elasticsearch.xpack.test:feature-aware:${version}": ':x-pack:test:feature-aware', // for transport client @@ -244,13 +244,13 @@ allprojects { bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unreleasedVersion -> Version unreleased = unreleasedVersion.version String snapshotProject = ":distribution:bwc:${unreleasedVersion.gradleProjectName}" - ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${unreleased}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${unreleased}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.deb:elasticsearch:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.rpm:elasticsearch:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.zip:elasticsearch:${unreleased}"] = snapshotProject if (unreleased.onOrAfter('6.3.0')) { - ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch-oss:${unreleased}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch-oss:${unreleased}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch-oss:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.deb:elasticsearch-oss:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.rpm:elasticsearch-oss:${unreleased}"] = snapshotProject + ext.projectSubstitutions["downloads.zip:elasticsearch-oss:${unreleased}"] = snapshotProject } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index b9ff1fd485e05..b98cdd8788bd4 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -178,6 +178,21 @@ if (project != rootProject) { jarHell.enabled = false thirdPartyAudit.enabled = false + configurations { + distribution + } + + dependencies { + distribution project(':distribution:archives:zip') + distribution project(':distribution:archives:oss-zip') + } + + String localDownloads = "${rootProject.buildDir}/local-downloads" + task setupLocalDownloads(type:Copy) { + from configurations.distribution + into localDownloads + } + unitTest { // The test task is configured to runtimeJava version, but build-tools doesn't support all of them, so test // with compiler instead on the ones that are too old. @@ -192,6 +207,7 @@ if (project != rootProject) { dependsOn project.rootProject.allprojects.collect { it.tasks.matching { it.name == 'publishNebulaPublicationToLocalTestRepository'} } + dependsOn setupLocalDownloads exclude "**/*Tests.class" testClassesDirs = sourceSets.test.output.classesDirs classpath = sourceSets.test.runtimeClasspath @@ -204,6 +220,7 @@ if (project != rootProject) { ).asPath, ) systemProperty 'test.local-test-repo-path', "${rootProject.buildDir}/local-test-repo" + systemProperty 'test.local-test-downloads-path', localDownloads systemProperty 'test.version_under_test', version systemProperty 'test.lucene-snapshot-revision', (versions.lucene =~ /\w+-snapshot-([a-z0-9]+)/)[0][1] } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index c2741ed5819f4..af207bcae7ca8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -407,9 +407,16 @@ class BuildPlugin implements Plugin { if (messages.isEmpty() == false) { throw new GradleException(messages.join('\n')) } + rootProject.rootProject.ext.requiredJavaVersions = null // reset to null to indicate the pre-execution checks have executed } + } else if (rootProject.rootProject.requiredJavaVersions == null) { + // check directly if the version is present since we are already executing + if (rootProject.javaVersions.get(version) == null) { + throw new GradleException("JAVA${version}_HOME required to run task:\n${task}") + } + } else { + rootProject.requiredJavaVersions.get(version).add(task) } - rootProject.requiredJavaVersions.get(version).add(task) } /** A convenience method for getting java home for a version of java and requiring that version for the given task to execute */ @@ -558,6 +565,12 @@ class BuildPlugin implements Plugin { repos.mavenLocal() } repos.jcenter() + repos.ivy { + url "https://artifacts.elastic.co/downloads" + patternLayout { + artifact "elasticsearch/[module]-[revision](-[classifier]).[ext]" + } + } repos.maven { name "elastic" url "https://artifacts.elastic.co/maven" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 28566662b4674..15cef3f472817 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -34,7 +34,7 @@ public class DocsTestPlugin extends RestTestPlugin { project.pluginManager.apply('elasticsearch.standalone-rest-test') super.apply(project) // The distribution can be configured with -Dtests.distribution on the command line - project.integTestCluster.distribution = System.getProperty('tests.distribution', 'zip') + project.integTestCluster.distribution = System.getProperty('tests.distribution', 'default') // Docs are published separately so no need to assemble project.tasks.assemble.enabled = false Map defaultSubstitutions = [ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 91cee78496e63..0a53787c10597 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -29,7 +29,7 @@ class ClusterConfiguration { private final Project project @Input - String distribution = 'zip' + String distribution = 'default' @Input int numNodes = 1 diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index d87e33166e74d..72041b4e628d3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -18,20 +18,17 @@ */ package org.elasticsearch.gradle.test -import java.util.stream.Collectors import org.apache.tools.ant.DefaultLogger import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties - import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.plugin.PluginPropertiesExtension import org.gradle.api.AntBuilder import org.gradle.api.DefaultTask import org.gradle.api.GradleException -import org.gradle.api.InvalidUserDataException import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.artifacts.Configuration @@ -45,7 +42,7 @@ import org.gradle.api.tasks.Exec import java.nio.charset.StandardCharsets import java.nio.file.Paths import java.util.concurrent.TimeUnit - +import java.util.stream.Collectors /** * A helper for creating tasks to build a cluster that is used by a task, and tear down the cluster when the task is finished. */ @@ -89,7 +86,7 @@ class ClusterFormationTasks { Configuration currentDistro = project.configurations.create("${prefix}_elasticsearchDistro") Configuration bwcDistro = project.configurations.create("${prefix}_elasticsearchBwcDistro") Configuration bwcPlugins = project.configurations.create("${prefix}_elasticsearchBwcPlugins") - if (System.getProperty('tests.distribution', 'oss-zip') == 'integ-test-zip') { + if (System.getProperty('tests.distribution', 'oss') == 'integ-test-zip') { throw new Exception("tests.distribution=integ-test-zip is not supported") } configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch) @@ -174,24 +171,31 @@ class ClusterFormationTasks { /** Adds a dependency on the given distribution */ static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) { - if (Version.fromString(elasticsearchVersion).before('6.3.0') && - distro.startsWith('oss-') - ) { + // TEMP HACK + // The oss docs CI build overrides the distro on the command line. This hack handles backcompat until CI is updated. + if (distro.equals('oss-zip')) { + distro = 'oss' + } + if (distro.equals('zip')) { + distro = 'default' + } + // END TEMP HACK + if (['integ-test-zip', 'oss', 'default'].contains(distro) == false) { + throw new GradleException("Unknown distribution: ${distro} in project ${project.path}") + } + Version version = Version.fromString(elasticsearchVersion) + if (version.before('6.3.0') && distro.startsWith('oss-')) { distro = distro.substring('oss-'.length()) } - String packaging = distro - if (distro.contains('tar')) { - packaging = 'tar.gz'\ - } else if (distro.contains('zip')) { - packaging = 'zip' + String group = "downloads.zip" + if (distro.equals("integ-test-zip")) { + group = "org.elasticsearch.distribution.integ-test-zip" } - String subgroup = distro String artifactName = 'elasticsearch' - if (distro.contains('oss')) { + if (distro.equals('oss') && Version.fromString(elasticsearchVersion).onOrAfter('6.3.0')) { artifactName += '-oss' - subgroup = distro.substring('oss-'.length()) } - project.dependencies.add(configuration.name, "org.elasticsearch.distribution.${subgroup}:${artifactName}:${elasticsearchVersion}@${packaging}") + project.dependencies.add(configuration.name, "${group}:${artifactName}:${elasticsearchVersion}@zip") } /** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */ @@ -314,31 +318,13 @@ class ClusterFormationTasks { elasticsearch source tree. If this is a plugin built in the elasticsearch source tree or this is a distro in the elasticsearch source tree then this should be the version of elasticsearch built by the source tree. If it isn't then Bad Things(TM) will happen. */ - Task extract - - switch (node.config.distribution) { - case 'integ-test-zip': - case 'zip': - case 'oss-zip': - extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { - from { - project.zipTree(configuration.singleFile) - } - into node.baseDir - } - break; - case 'tar': - case 'oss-tar': - extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { - from { - project.tarTree(project.resources.gzip(configuration.singleFile)) - } - into node.baseDir - } - break; - default: - throw new InvalidUserDataException("Unknown distribution: ${node.config.distribution}") + Task extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { + from { + project.zipTree(configuration.singleFile) + } + into node.baseDir } + return extract } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index cd99950902e55..63af1dda03c3d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -29,9 +29,6 @@ import org.gradle.api.Project import java.nio.file.Files import java.nio.file.Path import java.nio.file.Paths - -import static org.elasticsearch.gradle.BuildPlugin.getJavaHome - /** * A container for the files and configuration associated with a single node in a test cluster. */ @@ -125,8 +122,8 @@ class NodeInfo { baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}") pidFile = new File(baseDir, 'es.pid') this.nodeVersion = Version.fromString(nodeVersion) - homeDir = homeDir(baseDir, config.distribution, nodeVersion) - pathConf = pathConf(baseDir, config.distribution, nodeVersion) + homeDir = new File(baseDir, "elasticsearch-${nodeVersion}") + pathConf = new File(homeDir, 'config') if (config.dataDir != null) { dataDir = "${config.dataDir(nodeNum)}" } else { @@ -299,41 +296,4 @@ class NodeInfo { } return dataDir } - - /** Returns the directory elasticsearch home is contained in for the given distribution */ - static File homeDir(File baseDir, String distro, String nodeVersion) { - String path - switch (distro) { - case 'integ-test-zip': - case 'zip': - case 'tar': - case 'oss-zip': - case 'oss-tar': - path = "elasticsearch-${nodeVersion}" - break - case 'rpm': - case 'deb': - path = "${distro}-extracted/usr/share/elasticsearch" - break - default: - throw new InvalidUserDataException("Unknown distribution: ${distro}") - } - return new File(baseDir, path) - } - - static File pathConf(File baseDir, String distro, String nodeVersion) { - switch (distro) { - case 'integ-test-zip': - case 'zip': - case 'oss-zip': - case 'tar': - case 'oss-tar': - return new File(homeDir(baseDir, distro, nodeVersion), 'config') - case 'rpm': - case 'deb': - return new File(baseDir, "${distro}-extracted/etc/elasticsearch") - default: - throw new InvalidUserDataException("Unknown distribution: ${distro}") - } - } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy index cdadc78e9ae0e..c34b2f00f4c65 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy @@ -16,7 +16,7 @@ public class RunTask extends DefaultTask { clusterConfig.httpPort = 9200 clusterConfig.transportPort = 9300 clusterConfig.daemonize = false - clusterConfig.distribution = 'zip' + clusterConfig.distribution = 'default' project.afterEvaluate { ClusterFormationTasks.setup(project, name, this, clusterConfig) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index aabd0cf119285..dfb825afea255 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -193,10 +193,10 @@ class VagrantTestPlugin implements Plugin { UPGRADE_FROM_ARCHIVES.each { // The version of elasticsearch that we upgrade *from* project.dependencies.add(PACKAGING_CONFIGURATION, - "org.elasticsearch.distribution.${it}:elasticsearch:${upgradeFromVersion}@${it}") + "downloads.${it}:elasticsearch:${upgradeFromVersion}@${it}") if (upgradeFromVersion.onOrAfter('6.3.0')) { project.dependencies.add(PACKAGING_CONFIGURATION, - "org.elasticsearch.distribution.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}") + "downloads.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}") } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index fa4415bbe1e91..6cb8f2bd2f220 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -324,8 +324,9 @@ private void configure() { getConfPathSharedData().mkdirs(); getConfPathLogs().mkdirs(); LinkedHashMap config = new LinkedHashMap<>(); - config.put("cluster.name", "cluster-" + safeName(name)); - config.put("node.name", "node-" + safeName(name)); + String nodeName = safeName(name); + config.put("cluster.name",nodeName); + config.put("node.name", nodeName); config.put("path.repo", getConfPathRepo().getAbsolutePath()); config.put("path.data", getConfPathData().getAbsolutePath()); config.put("path.logs", getConfPathLogs().getAbsolutePath()); @@ -342,6 +343,9 @@ private void configure() { if (Version.fromString(version).getMajor() >= 6) { config.put("cluster.routing.allocation.disk.watermark.flood_stage", "1b"); } + if (Version.fromString(version).getMajor() >= 7) { + config.put("cluster.initial_master_nodes", "[" + nodeName + "]"); + } try { Files.write( getConfigFile().toPath(), diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index aca9906701150..c0632873eca8a 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -127,6 +127,9 @@ private void adaptBuildScriptForTest() throws IOException { " maven {\n" + " url \"" + getLocalTestRepoPath() + "\"\n" + " }\n" + + " flatDir {\n" + + " dir '" + getLocalTestDownloadsPath() + "'\n" + + " }\n" + luceneSnapshotRepo + "}\n" ); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index 3e1d0b176b011..f7a0382cec775 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -144,8 +144,16 @@ protected void assertBuildFileDoesNotExists(BuildResult result, String projectNa } protected String getLocalTestRepoPath() { - String property = System.getProperty("test.local-test-repo-path"); - Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests"); + return getLocalTestPath("test.local-test-repo-path"); + } + + protected String getLocalTestDownloadsPath() { + return getLocalTestPath("test.local-test-downloads-path"); + } + + private String getLocalTestPath(String propertyName) { + String property = System.getProperty(propertyName); + Objects.requireNonNull(property, propertyName + " not passed to tests"); File file = new File(property); assertTrue("Expected " + property + " to exist, but it did not!", file.exists()); if (File.separator.equals("\\")) { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 514f75eaa86e9..ee366ac7b7c65 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -21,11 +21,9 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; -import org.junit.Ignore; import java.util.Arrays; -@Ignore // https://github.com/elastic/elasticsearch/issues/37218 public class TestClustersPluginIT extends GradleIntegrationTestCase { public void testListClusters() { diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 40380af0823b6..778f29686ad67 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0 -lucene = 8.0.0-snapshot-a1c6e642aa +lucene = 8.0.0-snapshot-83f9835 # optional dependencies spatial4j = 0.7 diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/AbstractIndicesPrivileges.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/AbstractIndicesPrivileges.java index 50d80d04bd40c..4187ca7638d4e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/AbstractIndicesPrivileges.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/AbstractIndicesPrivileges.java @@ -39,14 +39,16 @@ public abstract class AbstractIndicesPrivileges { static final ParseField NAMES = new ParseField("names"); + static final ParseField ALLOW_RESTRICTED_INDICES = new ParseField("allow_restricted_indices"); static final ParseField PRIVILEGES = new ParseField("privileges"); static final ParseField FIELD_PERMISSIONS = new ParseField("field_security"); static final ParseField QUERY = new ParseField("query"); protected final Set indices; protected final Set privileges; + protected final boolean allowRestrictedIndices; - AbstractIndicesPrivileges(Collection indices, Collection privileges) { + AbstractIndicesPrivileges(Collection indices, Collection privileges, boolean allowRestrictedIndices) { if (null == indices || indices.isEmpty()) { throw new IllegalArgumentException("indices privileges must refer to at least one index name or index name pattern"); } @@ -55,6 +57,7 @@ public abstract class AbstractIndicesPrivileges { } this.indices = Collections.unmodifiableSet(new HashSet<>(indices)); this.privileges = Collections.unmodifiableSet(new HashSet<>(privileges)); + this.allowRestrictedIndices = allowRestrictedIndices; } /** @@ -73,6 +76,15 @@ public Set getPrivileges() { return this.privileges; } + /** + * True if the privileges cover restricted internal indices too. Certain indices are reserved for internal services and should be + * transparent to ordinary users. For that matter, when granting privileges, you also have to toggle this flag to confirm that all + * indices, including restricted ones, are in the scope of this permission. By default this is false. + */ + public boolean allowRestrictedIndices() { + return this.allowRestrictedIndices; + } + /** * If {@code true} some documents might not be visible. Only the documents * matching {@code query} will be readable. diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/IndicesPrivileges.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/IndicesPrivileges.java index 73b383588c6e6..26c303304cd66 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/IndicesPrivileges.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/IndicesPrivileges.java @@ -50,14 +50,16 @@ public final class IndicesPrivileges extends AbstractIndicesPrivileges implement int i = 0; final Collection indices = (Collection) constructorObjects[i++]; final Collection privileges = (Collection) constructorObjects[i++]; + final boolean allowRestrictedIndices = (Boolean) constructorObjects[i++]; final FieldSecurity fields = (FieldSecurity) constructorObjects[i++]; final String query = (String) constructorObjects[i]; - return new IndicesPrivileges(indices, privileges, fields, query); + return new IndicesPrivileges(indices, privileges, allowRestrictedIndices, fields, query); }); static { PARSER.declareStringArray(constructorArg(), NAMES); PARSER.declareStringArray(constructorArg(), PRIVILEGES); + PARSER.declareBoolean(constructorArg(), ALLOW_RESTRICTED_INDICES); PARSER.declareObject(optionalConstructorArg(), FieldSecurity::parse, FIELD_PERMISSIONS); PARSER.declareStringOrNull(optionalConstructorArg(), QUERY); } @@ -66,9 +68,9 @@ public final class IndicesPrivileges extends AbstractIndicesPrivileges implement // missing query means all documents, i.e. no restrictions private final @Nullable String query; - private IndicesPrivileges(Collection indices, Collection privileges, @Nullable FieldSecurity fieldSecurity, - @Nullable String query) { - super(indices, privileges); + private IndicesPrivileges(Collection indices, Collection privileges, boolean allowRestrictedIndices, + @Nullable FieldSecurity fieldSecurity, @Nullable String query) { + super(indices, privileges, allowRestrictedIndices); this.fieldSecurity = fieldSecurity; this.query = query; } @@ -118,13 +120,14 @@ public boolean equals(Object o) { IndicesPrivileges that = (IndicesPrivileges) o; return indices.equals(that.indices) && privileges.equals(that.privileges) + && allowRestrictedIndices == that.allowRestrictedIndices && Objects.equals(this.fieldSecurity, that.fieldSecurity) && Objects.equals(query, that.query); } @Override public int hashCode() { - return Objects.hash(indices, privileges, fieldSecurity, query); + return Objects.hash(indices, privileges, allowRestrictedIndices, fieldSecurity, query); } @Override @@ -141,6 +144,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(NAMES.getPreferredName(), indices); builder.field(PRIVILEGES.getPreferredName(), privileges); + builder.field(ALLOW_RESTRICTED_INDICES.getPreferredName(), allowRestrictedIndices); if (fieldSecurity != null) { builder.field(FIELD_PERMISSIONS.getPreferredName(), fieldSecurity, params); } @@ -170,6 +174,7 @@ public static final class Builder { Collection deniedFields = null; private @Nullable String query = null; + boolean allowRestrictedIndices = false; public Builder() { } @@ -223,6 +228,11 @@ public Builder query(@Nullable String query) { return this; } + public Builder allowRestrictedIndices(boolean allow) { + this.allowRestrictedIndices = allow; + return this; + } + public IndicesPrivileges build() { final FieldSecurity fieldSecurity; if (grantedFields == null && deniedFields == null) { @@ -230,7 +240,7 @@ public IndicesPrivileges build() { } else { fieldSecurity = new FieldSecurity(grantedFields, deniedFields); } - return new IndicesPrivileges(indices, privileges, fieldSecurity, query); + return new IndicesPrivileges(indices, privileges, allowRestrictedIndices, fieldSecurity, query); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/UserIndicesPrivileges.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/UserIndicesPrivileges.java index 9da2717831c1f..9878bb0071740 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/UserIndicesPrivileges.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/UserIndicesPrivileges.java @@ -52,6 +52,7 @@ public class UserIndicesPrivileges extends AbstractIndicesPrivileges { static { PARSER.declareStringArray(constructorArg(), IndicesPrivileges.NAMES); PARSER.declareStringArray(constructorArg(), IndicesPrivileges.PRIVILEGES); + PARSER.declareBoolean(constructorArg(), IndicesPrivileges.ALLOW_RESTRICTED_INDICES); PARSER.declareObjectArray(optionalConstructorArg(), IndicesPrivileges.FieldSecurity::parse, IndicesPrivileges.FIELD_PERMISSIONS); PARSER.declareStringArray(optionalConstructorArg(), IndicesPrivileges.QUERY); } @@ -61,8 +62,9 @@ private static UserIndicesPrivileges buildObjectFromParserArgs(Object[] args) { return new UserIndicesPrivileges( (List) args[0], (List) args[1], - (List) args[2], - (List) args[3] + (Boolean) args[2], + (List) args[3], + (List) args[4] ); } @@ -70,21 +72,13 @@ public static UserIndicesPrivileges fromXContent(XContentParser parser) throws I return PARSER.parse(parser, null); } - public UserIndicesPrivileges(Collection indices, Collection privileges, + public UserIndicesPrivileges(Collection indices, Collection privileges, boolean allowRestrictedIndices, Collection fieldSecurity, Collection query) { - super(indices, privileges); + super(indices, privileges, allowRestrictedIndices); this.fieldSecurity = fieldSecurity == null ? Collections.emptySet() : Collections.unmodifiableSet(new HashSet<>(fieldSecurity)); this.query = query == null ? Collections.emptySet() : Collections.unmodifiableSet(new HashSet<>(query)); } - public Set getIndices() { - return indices; - } - - public Set getPrivileges() { - return privileges; - } - public Set getFieldSecurity() { return fieldSecurity; } @@ -114,13 +108,14 @@ public boolean equals(Object o) { final UserIndicesPrivileges that = (UserIndicesPrivileges) o; return Objects.equals(indices, that.indices) && Objects.equals(privileges, that.privileges) && + allowRestrictedIndices == that.allowRestrictedIndices && Objects.equals(fieldSecurity, that.fieldSecurity) && Objects.equals(query, that.query); } @Override public int hashCode() { - return Objects.hash(indices, privileges, fieldSecurity, query); + return Objects.hash(indices, privileges, allowRestrictedIndices, fieldSecurity, query); } @Override @@ -128,6 +123,7 @@ public String toString() { return "UserIndexPrivilege{" + "indices=" + indices + ", privileges=" + privileges + + ", allow_restricted_indices=" + allowRestrictedIndices + ", fieldSecurity=" + fieldSecurity + ", query=" + query + '}'; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java index 900f4210a9952..d9bd606167370 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java @@ -393,7 +393,8 @@ public void testPutRole() throws IOException { final List indicesPrivilegeDeniedName = Arrays.asList(randomArray(3, String[]::new, () -> randomAlphaOfLength(5))); final String indicesPrivilegeQuery = randomAlphaOfLengthBetween(0, 7); final IndicesPrivileges indicesPrivilege = IndicesPrivileges.builder().indices(indicesName).privileges(indicesPrivilegeName) - .grantedFields(indicesPrivilegeGrantedName).deniedFields(indicesPrivilegeDeniedName).query(indicesPrivilegeQuery).build(); + .allowRestrictedIndices(randomBoolean()).grantedFields(indicesPrivilegeGrantedName).deniedFields(indicesPrivilegeDeniedName) + .query(indicesPrivilegeQuery).build(); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); final Map expectedParams; if (refreshPolicy != RefreshPolicy.NONE) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java index b05c7a0dde368..1f6373aff6a8c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java @@ -630,6 +630,22 @@ public void onFailure(Exception e) { public void testGetFollowStats() throws Exception { RestHighLevelClient client = highLevelClient(); + { + // Create leader index: + CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader"); + createIndexRequest.settings(Collections.singletonMap("index.soft_deletes.enabled", true)); + CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + assertThat(response.isAcknowledged(), is(true)); + } + { + // Follow index, so that we can query for follow stats: + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", "follower"); + PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); + assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); + assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); + assertThat(putFollowResponse.isIndexFollowingStarted(), is(true)); + } + // tag::ccr-get-follow-stats-request FollowStatsRequest request = new FollowStatsRequest("follower"); // <1> @@ -671,6 +687,12 @@ public void onFailure(Exception e) { // end::ccr-get-follow-stats-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + { + PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower"); + AcknowledgedResponse pauseFollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT); + assertThat(pauseFollowResponse.isAcknowledged(), is(true)); + } } static Map toMap(Response response) throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index c8220e9cc0c05..f5ec0e2c885b4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -742,8 +742,10 @@ public void testHasPrivileges() throws Exception { HasPrivilegesRequest request = new HasPrivilegesRequest( Sets.newHashSet("monitor", "manage"), Sets.newHashSet( - IndicesPrivileges.builder().indices("logstash-2018-10-05").privileges("read", "write").build(), - IndicesPrivileges.builder().indices("logstash-2018-*").privileges("read").build() + IndicesPrivileges.builder().indices("logstash-2018-10-05").privileges("read", "write") + .allowRestrictedIndices(false).build(), + IndicesPrivileges.builder().indices("logstash-2018-*").privileges("read") + .allowRestrictedIndices(true).build() ), null ); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java index 40b38e40b2647..aff64533ece0f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.client.indices; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.RandomCreateIndexGenerator; @@ -26,6 +27,7 @@ import java.io.IOException; +@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37654") public class PutMappingRequestTests extends AbstractXContentTestCase { @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRolesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRolesResponseTests.java index 224b4d59d250d..c4620fa1a2f3d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRolesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRolesResponseTests.java @@ -48,6 +48,7 @@ public void testFromXContent() throws IOException { " {\n" + " \"names\" : [ \"index1\", \"index2\" ],\n" + " \"privileges\" : [ \"all\" ],\n" + + " \"allow_restricted_indices\" : true,\n" + " \"field_security\" : {\n" + " \"grant\" : [ \"title\", \"body\" ]}\n" + " }\n" + @@ -81,6 +82,7 @@ public void usedDeprecatedField(String usedName, String replacedWith) { .indices("index1", "index2") .privileges("all") .grantedFields("title", "body") + .allowRestrictedIndices(true) .build(); assertThat(role.getIndicesPrivileges().contains(expectedIndicesPrivileges), equalTo(true)); final Map expectedMetadata = new HashMap<>(); @@ -106,6 +108,7 @@ public void testEqualsHashCode() { .privileges("write", "monitor", "delete") .grantedFields("field1", "field2") .deniedFields("field3", "field4") + .allowRestrictedIndices(true) .build(); Map metadata = new HashMap<>(); metadata.put("key", "value"); @@ -125,9 +128,10 @@ public void testEqualsHashCode() { .privileges("write", "monitor", "delete") .grantedFields("other_field1", "other_field2") .deniedFields("other_field3", "other_field4") + .allowRestrictedIndices(false) .build(); Map metadata2 = new HashMap<>(); - metadata.put("other_key", "other_value"); + metadata2.put("other_key", "other_value"); final Role role2 = Role.builder() .name("role2_name") .clusterPrivileges("monitor", "manage", "manage_saml") @@ -158,6 +162,7 @@ private static GetRolesResponse mutateTestItem(GetRolesResponse original) { .privileges("write", "monitor", "delete") .grantedFields("field1", "field2") .deniedFields("field3", "field4") + .allowRestrictedIndices(true) .build(); Map metadata = new HashMap(); metadata.put("key", "value"); @@ -179,6 +184,7 @@ private static GetRolesResponse mutateTestItem(GetRolesResponse original) { .privileges("write", "monitor", "delete") .grantedFields("field1", "field2") .deniedFields("field3", "field4") + .allowRestrictedIndices(false) .build(); Map metadata = new HashMap(); metadata.put("key", "value"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetUserPrivilegesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetUserPrivilegesResponseTests.java index e87ca0bf7b41b..6b310a1a6fd2d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetUserPrivilegesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetUserPrivilegesResponseTests.java @@ -48,17 +48,18 @@ public void testParse() throws Exception { " {\"application\":{\"manage\":{\"applications\":[\"apps-*\"]}}}" + "]," + "\"indices\":[" + - " {\"names\":[\"test-1-*\"],\"privileges\":[\"read\"]}," + - " {\"names\":[\"test-4-*\"],\"privileges\":[\"read\"],\"field_security\":[{\"grant\":[\"*\"],\"except\":[\"private-*\"]}]}," + - " {\"names\":[\"test-6-*\",\"test-7-*\"],\"privileges\":[\"read\"]," + + " {\"names\":[\"test-1-*\"],\"privileges\":[\"read\"],\"allow_restricted_indices\": false}," + + " {\"names\":[\"test-4-*\"],\"privileges\":[\"read\"],\"allow_restricted_indices\": true," + + " \"field_security\":[{\"grant\":[\"*\"],\"except\":[\"private-*\"]}]}," + + " {\"names\":[\"test-6-*\",\"test-7-*\"],\"privileges\":[\"read\"],\"allow_restricted_indices\": true," + " \"query\":[\"{\\\"term\\\":{\\\"test\\\":true}}\"]}," + - " {\"names\":[\"test-2-*\"],\"privileges\":[\"read\"]," + + " {\"names\":[\"test-2-*\"],\"privileges\":[\"read\"],\"allow_restricted_indices\": false," + " \"field_security\":[{\"grant\":[\"*\"],\"except\":[\"secret-*\",\"private-*\"]},{\"grant\":[\"apps-*\"]}]," + " \"query\":[\"{\\\"term\\\":{\\\"test\\\":true}}\",\"{\\\"term\\\":{\\\"apps\\\":true}}\"]}," + - " {\"names\":[\"test-3-*\",\"test-6-*\"],\"privileges\":[\"read\",\"write\"]}," + - " {\"names\":[\"test-3-*\",\"test-4-*\",\"test-5-*\"],\"privileges\":[\"read\"]," + + " {\"names\":[\"test-3-*\",\"test-6-*\"],\"privileges\":[\"read\",\"write\"],\"allow_restricted_indices\": true}," + + " {\"names\":[\"test-3-*\",\"test-4-*\",\"test-5-*\"],\"privileges\":[\"read\"],\"allow_restricted_indices\": false," + " \"field_security\":[{\"grant\":[\"test-*\"]}]}," + - " {\"names\":[\"test-1-*\",\"test-9-*\"],\"privileges\":[\"all\"]}" + + " {\"names\":[\"test-1-*\",\"test-9-*\"],\"privileges\":[\"all\"],\"allow_restricted_indices\": true}" + "]," + "\"applications\":[" + " {\"application\":\"app-dne\",\"privileges\":[\"all\"],\"resources\":[\"*\"]}," + @@ -80,12 +81,14 @@ public void testParse() throws Exception { assertThat(response.getIndicesPrivileges().size(), equalTo(7)); assertThat(Iterables.get(response.getIndicesPrivileges(), 0).getIndices(), contains("test-1-*")); assertThat(Iterables.get(response.getIndicesPrivileges(), 0).getPrivileges(), contains("read")); + assertThat(Iterables.get(response.getIndicesPrivileges(), 0).allowRestrictedIndices(), equalTo(false)); assertThat(Iterables.get(response.getIndicesPrivileges(), 0).getFieldSecurity(), emptyIterable()); assertThat(Iterables.get(response.getIndicesPrivileges(), 0).getQueries(), emptyIterable()); final UserIndicesPrivileges test4Privilege = Iterables.get(response.getIndicesPrivileges(), 1); assertThat(test4Privilege.getIndices(), contains("test-4-*")); assertThat(test4Privilege.getPrivileges(), contains("read")); + assertThat(test4Privilege.allowRestrictedIndices(), equalTo(true)); assertThat(test4Privilege.getFieldSecurity(), iterableWithSize(1)); final IndicesPrivileges.FieldSecurity test4FLS = test4Privilege.getFieldSecurity().iterator().next(); assertThat(test4FLS.getGrantedFields(), contains("*")); @@ -95,6 +98,7 @@ public void testParse() throws Exception { final UserIndicesPrivileges test2Privilege = Iterables.get(response.getIndicesPrivileges(), 3); assertThat(test2Privilege.getIndices(), contains("test-2-*")); assertThat(test2Privilege.getPrivileges(), contains("read")); + assertThat(test2Privilege.allowRestrictedIndices(), equalTo(false)); assertThat(test2Privilege.getFieldSecurity(), iterableWithSize(2)); final Iterator test2FLSIter = test2Privilege.getFieldSecurity().iterator(); final IndicesPrivileges.FieldSecurity test2FLS1 = test2FLSIter.next(); @@ -110,6 +114,7 @@ public void testParse() throws Exception { assertThat(Iterables.get(response.getIndicesPrivileges(), 6).getIndices(), contains("test-1-*", "test-9-*")); assertThat(Iterables.get(response.getIndicesPrivileges(), 6).getPrivileges(), contains("all")); + assertThat(Iterables.get(response.getIndicesPrivileges(), 6).allowRestrictedIndices(), equalTo(true)); assertThat(Iterables.get(response.getIndicesPrivileges(), 6).getFieldSecurity(), emptyIterable()); assertThat(Iterables.get(response.getIndicesPrivileges(), 6).getQueries(), emptyIterable()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesRequestTests.java index 5a888bd95e4ab..fa1245925d7ea 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesRequestTests.java @@ -41,8 +41,10 @@ public void testToXContent() throws IOException { final HasPrivilegesRequest request = new HasPrivilegesRequest( new LinkedHashSet<>(Arrays.asList("monitor", "manage_watcher", "manage_ml")), new LinkedHashSet<>(Arrays.asList( - IndicesPrivileges.builder().indices("index-001", "index-002").privileges("all").build(), - IndicesPrivileges.builder().indices("index-003").privileges("read").build() + IndicesPrivileges.builder().indices("index-001", "index-002").privileges("all") + .allowRestrictedIndices(true).build(), + IndicesPrivileges.builder().indices("index-003").privileges("read") + .build() )), new LinkedHashSet<>(Arrays.asList( new ApplicationResourcePrivileges("myapp", Arrays.asList("read", "write"), Arrays.asList("*")), @@ -56,10 +58,12 @@ public void testToXContent() throws IOException { " \"cluster\":[\"monitor\",\"manage_watcher\",\"manage_ml\"]," + " \"index\":[{" + " \"names\":[\"index-001\",\"index-002\"]," + - " \"privileges\":[\"all\"]" + + " \"privileges\":[\"all\"]," + + " \"allow_restricted_indices\":true" + " },{" + " \"names\":[\"index-003\"]," + - " \"privileges\":[\"read\"]" + + " \"privileges\":[\"read\"]," + + " \"allow_restricted_indices\":false" + " }]," + " \"application\":[{" + " \"application\":\"myapp\"," + @@ -81,6 +85,7 @@ public void testEqualsAndHashCode() { () -> IndicesPrivileges.builder() .indices(generateRandomStringArray(5, 12, false, false)) .privileges(generateRandomStringArray(3, 8, false, false)) + .allowRestrictedIndices(randomBoolean()) .build())); final Set application = Sets.newHashSet(randomArray(1, 5, ApplicationResourcePrivileges[]::new, () -> new ApplicationResourcePrivileges( diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/IndicesPrivilegesTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/IndicesPrivilegesTests.java index 988b50e4b75b8..ec4ea851abdb5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/IndicesPrivilegesTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/IndicesPrivilegesTests.java @@ -37,6 +37,7 @@ public static IndicesPrivileges createNewRandom(String query) { final IndicesPrivileges.Builder indicesPrivilegesBuilder = IndicesPrivileges.builder() .indices(generateRandomStringArray(4, 4, false, false)) .privileges(randomSubsetOf(randomIntBetween(1, 4), Role.IndexPrivilegeName.ALL_ARRAY)) + .allowRestrictedIndices(randomBoolean()) .query(query); if (randomBoolean()) { final List fields = Arrays.asList(generateRandomStringArray(4, 4, false)); @@ -49,7 +50,8 @@ public static IndicesPrivileges createNewRandom(String query) { } public void testToXContentWithNullFieldSecurity() { - final IndicesPrivileges privileges = IndicesPrivileges.builder().indices("abc").privileges("all").build(); + final IndicesPrivileges privileges = IndicesPrivileges.builder().indices("abc").privileges("all") + .allowRestrictedIndices(randomBoolean()).build(); final String json = Strings.toString(privileges); assertThat(json, not(containsString("field_security"))); } @@ -60,6 +62,7 @@ public void testToXContentWithEmptyFieldSecurity() { .privileges("all") .grantedFields(Collections.emptyList()) .deniedFields(Collections.emptyList()) + .allowRestrictedIndices(randomBoolean()) .build(); final String json = Strings.toString(privileges); assertThat(json, containsString("field_security")); @@ -71,6 +74,7 @@ public void testToXContentWithDeniedFieldsOnly() { .indices("abc") .privileges("all") .deniedFields("secret.*") + .allowRestrictedIndices(randomBoolean()) .build(); final String json = Strings.toString(privileges); assertThat(json, containsString("field_security")); diff --git a/distribution/build.gradle b/distribution/build.gradle index 0e439b3586bc3..3cf4a170700df 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -337,8 +337,8 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } task run(type: RunTask) { - distribution = System.getProperty('run.distribution', 'zip') - if (distribution == 'zip') { + distribution = System.getProperty('run.distribution', 'default') + if (distribution == 'default') { String licenseType = System.getProperty("run.license_type", "basic") if (licenseType == 'trial') { setting 'xpack.ml.enabled', 'true' diff --git a/docs/build.gradle b/docs/build.gradle index 91086ce1f346d..ada204949febd 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -36,7 +36,7 @@ buildRestTests.expectedUnconvertedCandidates = [ ] integTestCluster { - if ("zip".equals(integTestCluster.distribution)) { + if ("default".equals(integTestCluster.distribution)) { setting 'xpack.license.self_generated.type', 'trial' } diff --git a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc index 0ccd58e0dc63c..af3274c1c09fe 100644 --- a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc @@ -7,20 +7,18 @@ Example: [source,js] -------------------------------------------------- -PUT /museums?include_type_name=true +PUT /museums { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "location": { + "type": "geo_point" } } } } -POST /museums/_doc/_bulk?refresh +POST /museums/_bulk?refresh {"index":{"_id":1}} {"location": "52.374081,4.912350", "city": "Amsterdam", "name": "NEMO Science Museum"} {"index":{"_id":2}} diff --git a/docs/reference/analysis/analyzers/configuring.asciidoc b/docs/reference/analysis/analyzers/configuring.asciidoc index f010f2ad6e9b1..994842508e8f5 100644 --- a/docs/reference/analysis/analyzers/configuring.asciidoc +++ b/docs/reference/analysis/analyzers/configuring.asciidoc @@ -8,7 +8,7 @@ to support a list of stop words: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -21,16 +21,14 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "my_text": { - "type": "text", - "analyzer": "standard", <2> - "fields": { - "english": { - "type": "text", - "analyzer": "std_english" <3> - } + "properties": { + "my_text": { + "type": "text", + "analyzer": "standard", <2> + "fields": { + "english": { + "type": "text", + "analyzer": "std_english" <3> } } } diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc index adfde27138af8..7386af902fbcc 100644 --- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc @@ -49,7 +49,7 @@ replace any embedded dashes in numbers with underscores, i.e `123-456-789` -> [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -100,7 +100,7 @@ camelCase words to be queried individually: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -125,12 +125,10 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "analyzer": "my_analyzer" - } + "properties": { + "text": { + "type": "text", + "analyzer": "my_analyzer" } } } diff --git a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc index 14f94ff21d3dc..2e3ae8b036c3f 100644 --- a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc @@ -104,7 +104,7 @@ length `10`: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -222,7 +222,7 @@ Below is an example of how to set up a field for _search-as-you-type_: [source,js] ----------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -250,13 +250,11 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "analyzer": "autocomplete", - "search_analyzer": "autocomplete_search" - } + "properties": { + "title": { + "type": "text", + "analyzer": "autocomplete", + "search_analyzer": "autocomplete_search" } } } diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index be41c0fdc77e5..96a01bbeb5d9e 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -85,6 +85,9 @@ Where: `date_format`:: is the optional format in which the computed date should be rendered. Defaults to `YYYY.MM.dd`. `time_zone`:: is the optional time zone . Defaults to `utc`. +Date math expressions are resolved locale-independent. Consequently, it is not possible to use any other +calendars than the Gregorian calendar. + You must enclose date math index name expressions within angle brackets, and all special characters should be URI encoded. For example: diff --git a/docs/reference/docs/termvectors.asciidoc b/docs/reference/docs/termvectors.asciidoc index d6227b3c36202..1e2748916005c 100644 --- a/docs/reference/docs/termvectors.asciidoc +++ b/docs/reference/docs/termvectors.asciidoc @@ -125,21 +125,19 @@ First, we create an index that stores term vectors, payloads etc. : [source,js] -------------------------------------------------- -PUT /twitter?include_type_name=true +PUT /twitter { "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "term_vector": "with_positions_offsets_payloads", - "store" : true, - "analyzer" : "fulltext_analyzer" - }, - "fullname": { - "type": "text", - "term_vector": "with_positions_offsets_payloads", - "analyzer" : "fulltext_analyzer" - } + "properties": { + "text": { + "type": "text", + "term_vector": "with_positions_offsets_payloads", + "store" : true, + "analyzer" : "fulltext_analyzer" + }, + "fullname": { + "type": "text", + "term_vector": "with_positions_offsets_payloads", + "analyzer" : "fulltext_analyzer" } } }, diff --git a/docs/reference/how-to/disk-usage.asciidoc b/docs/reference/how-to/disk-usage.asciidoc index d58233a7e01cb..713e03d188ad9 100644 --- a/docs/reference/how-to/disk-usage.asciidoc +++ b/docs/reference/how-to/disk-usage.asciidoc @@ -12,15 +12,13 @@ filter on, you can safely disable indexing on this field in your [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "integer", - "index": false - } + "properties": { + "foo": { + "type": "integer", + "index": false } } } @@ -35,15 +33,13 @@ to not write norms to the index: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "text", - "norms": false - } + "properties": { + "foo": { + "type": "text", + "norms": false } } } @@ -58,15 +54,13 @@ Elasticsearch to not index positions: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "text", - "index_options": "freqs" - } + "properties": { + "foo": { + "type": "text", + "index_options": "freqs" } } } @@ -81,16 +75,14 @@ and scoring will assume that terms appear only once in every document. [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "text", - "norms": false, - "index_options": "freqs" - } + "properties": { + "foo": { + "type": "text", + "norms": false, + "index_options": "freqs" } } } @@ -115,21 +107,19 @@ fields as `keyword`: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings": { - "match_mapping_type": "string", - "mapping": { - "type": "keyword" - } + "dynamic_templates": [ + { + "strings": { + "match_mapping_type": "string", + "mapping": { + "type": "keyword" } } - ] - } + } + ] } } -------------------------------------------------- diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index c0ec3f2a04c09..e8c213646578c 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -9,7 +9,7 @@ content indexed in two different ways: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "settings": { "analysis": { @@ -24,16 +24,14 @@ PUT index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "body": { - "type": "text", - "analyzer": "english", - "fields": { - "exact": { - "type": "text", - "analyzer": "english_exact" - } + "properties": { + "body": { + "type": "text", + "analyzer": "english", + "fields": { + "exact": { + "type": "text", + "analyzer": "english_exact" } } } diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index 4ab408be935ac..b136c953b8f02 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -50,22 +50,20 @@ field. [source,js] -------------------------------------------------- -PUT movies?include_type_name=true +PUT movies { "mappings": { - "_doc": { - "properties": { - "name_and_plot": { - "type": "text" - }, - "name": { - "type": "text", - "copy_to": "name_and_plot" - }, - "plot": { - "type": "text", - "copy_to": "name_and_plot" - } + "properties": { + "name_and_plot": { + "type": "text" + }, + "name": { + "type": "text", + "copy_to": "name_and_plot" + }, + "plot": { + "type": "text", + "copy_to": "name_and_plot" } } } @@ -123,14 +121,12 @@ should be mapped as a <>: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "price_range": { - "type": "keyword" - } + "properties": { + "price_range": { + "type": "keyword" } } } @@ -322,15 +318,13 @@ eagerly at refresh-time by configuring mappings as described below: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "keyword", - "eager_global_ordinals": true - } + "properties": { + "foo": { + "type": "keyword", + "eager_global_ordinals": true } } } diff --git a/docs/reference/index-modules/index-sorting.asciidoc b/docs/reference/index-modules/index-sorting.asciidoc index 1a0f8c65dc98b..b4648dd256d3c 100644 --- a/docs/reference/index-modules/index-sorting.asciidoc +++ b/docs/reference/index-modules/index-sorting.asciidoc @@ -14,7 +14,7 @@ For instance the following example shows how to define a sort on a single field: [source,js] -------------------------------------------------- -PUT twitter?include_type_name=true +PUT twitter { "settings" : { "index" : { @@ -23,11 +23,9 @@ PUT twitter?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date" - } + "properties": { + "date": { + "type": "date" } } } @@ -42,7 +40,7 @@ It is also possible to sort the index by more than one field: [source,js] -------------------------------------------------- -PUT twitter?include_type_name=true +PUT twitter { "settings" : { "index" : { @@ -51,15 +49,13 @@ PUT twitter?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "username": { - "type": "keyword", - "doc_values": true - }, - "date": { - "type": "date" - } + "properties": { + "username": { + "type": "keyword", + "doc_values": true + }, + "date": { + "type": "date" } } } @@ -118,7 +114,7 @@ For example, let's say we have an index that contains events sorted by a timesta [source,js] -------------------------------------------------- -PUT events?include_type_name=true +PUT events { "settings" : { "index" : { @@ -127,11 +123,9 @@ PUT events?include_type_name=true } }, "mappings": { - "doc": { - "properties": { - "timestamp": { - "type": "date" - } + "properties": { + "timestamp": { + "type": "date" } } } diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index 4d2404c3b52b0..014923d463cbd 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -20,7 +20,7 @@ settings. [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings" : { "index" : { @@ -200,7 +200,7 @@ TF-IDF: [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings": { "number_of_shards": 1, @@ -214,12 +214,10 @@ PUT /index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "field": { - "type": "text", - "similarity": "scripted_tfidf" - } + "properties": { + "field": { + "type": "text", + "similarity": "scripted_tfidf" } } } @@ -369,7 +367,7 @@ more efficient: [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings": { "number_of_shards": 1, @@ -386,12 +384,10 @@ PUT /index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "field": { - "type": "text", - "similarity": "scripted_tfidf" - } + "properties": { + "field": { + "type": "text", + "similarity": "scripted_tfidf" } } } @@ -537,7 +533,7 @@ it is <>: [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings": { "index": { diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index f9f902395c4b2..f38e62806bb9d 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -196,15 +196,13 @@ You can use the following mapping for the example index above: [source,js] -------------------------------------------------- -PUT my_ip_locations?include_type_name=true +PUT my_ip_locations { "mappings": { - "_doc": { - "properties": { - "geoip": { - "properties": { - "location": { "type": "geo_point" } - } + "properties": { + "geoip": { + "properties": { + "location": { "type": "geo_point" } } } } diff --git a/docs/reference/mapping/dynamic/field-mapping.asciidoc b/docs/reference/mapping/dynamic/field-mapping.asciidoc index c7a96a33473e5..735fddf3f84ab 100644 --- a/docs/reference/mapping/dynamic/field-mapping.asciidoc +++ b/docs/reference/mapping/dynamic/field-mapping.asciidoc @@ -66,12 +66,10 @@ Dynamic date detection can be disabled by setting `date_detection` to `false`: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "date_detection": false - } + "date_detection": false } } @@ -91,12 +89,10 @@ own <>: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_date_formats": ["MM/dd/yyyy"] - } + "dynamic_date_formats": ["MM/dd/yyyy"] } } @@ -119,12 +115,10 @@ correct solution is to map these fields explicitly, but numeric detection [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "numeric_detection": true - } + "numeric_detection": true } } diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index f10a9d3475b66..8598eab412e79 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -69,35 +69,33 @@ could use the following template: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "integers": { - "match_mapping_type": "long", - "mapping": { - "type": "integer" - } + "dynamic_templates": [ + { + "integers": { + "match_mapping_type": "long", + "mapping": { + "type": "integer" } - }, - { - "strings": { - "match_mapping_type": "string", - "mapping": { - "type": "text", - "fields": { - "raw": { - "type": "keyword", - "ignore_above": 256 - } + } + }, + { + "strings": { + "match_mapping_type": "string", + "mapping": { + "type": "text", + "fields": { + "raw": { + "type": "keyword", + "ignore_above": 256 } } } } - ] - } + } + ] } } @@ -125,23 +123,21 @@ fields: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "longs_as_strings": { - "match_mapping_type": "string", - "match": "long_*", - "unmatch": "*_text", - "mapping": { - "type": "long" - } + "dynamic_templates": [ + { + "longs_as_strings": { + "match_mapping_type": "string", + "match": "long_*", + "unmatch": "*_text", + "mapping": { + "type": "long" } } - ] - } + } + ] } } @@ -181,23 +177,21 @@ top-level `full_name` field, except for the `middle` field: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "full_name": { - "path_match": "name.*", - "path_unmatch": "*.middle", - "mapping": { - "type": "text", - "copy_to": "full_name" - } + "dynamic_templates": [ + { + "full_name": { + "path_match": "name.*", + "path_unmatch": "*.middle", + "mapping": { + "type": "text", + "copy_to": "full_name" } } - ] - } + } + ] } } @@ -222,32 +216,30 @@ field, and disables <> for all non-string fields: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "named_analyzers": { - "match_mapping_type": "string", - "match": "*", - "mapping": { - "type": "text", - "analyzer": "{name}" - } + "dynamic_templates": [ + { + "named_analyzers": { + "match_mapping_type": "string", + "match": "*", + "mapping": { + "type": "text", + "analyzer": "{name}" } - }, - { - "no_doc_values": { - "match_mapping_type":"*", - "mapping": { - "type": "{dynamic_type}", - "doc_values": false - } + } + }, + { + "no_doc_values": { + "match_mapping_type":"*", + "mapping": { + "type": "{dynamic_type}", + "doc_values": false } } - ] - } + } + ] } } @@ -276,21 +268,19 @@ you will have to search on the exact same value that was indexed. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings_as_keywords": { - "match_mapping_type": "string", - "mapping": { - "type": "keyword" - } + "dynamic_templates": [ + { + "strings_as_keywords": { + "match_mapping_type": "string", + "mapping": { + "type": "keyword" } } - ] - } + } + ] } } -------------------------------------------------- @@ -306,21 +296,19 @@ before 5.0): [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings_as_text": { - "match_mapping_type": "string", - "mapping": { - "type": "text" - } + "dynamic_templates": [ + { + "strings_as_text": { + "match_mapping_type": "string", + "mapping": { + "type": "text" } } - ] - } + } + ] } } -------------------------------------------------- @@ -334,28 +322,26 @@ disable the storage of these scoring factors in the index and save some space. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings_as_keywords": { - "match_mapping_type": "string", - "mapping": { - "type": "text", - "norms": false, - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } + "dynamic_templates": [ + { + "strings_as_keywords": { + "match_mapping_type": "string", + "mapping": { + "type": "text", + "norms": false, + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 } } } } - ] - } + } + ] } } -------------------------------------------------- @@ -375,31 +361,29 @@ maybe gain some indexing speed: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "unindexed_longs": { - "match_mapping_type": "long", - "mapping": { - "type": "long", - "index": false - } + "dynamic_templates": [ + { + "unindexed_longs": { + "match_mapping_type": "long", + "mapping": { + "type": "long", + "index": false } - }, - { - "unindexed_doubles": { - "match_mapping_type": "double", - "mapping": { - "type": "float", <1> - "index": false - } + } + }, + { + "unindexed_doubles": { + "match_mapping_type": "double", + "mapping": { + "type": "float", <1> + "index": false } } - ] - } + } + ] } } -------------------------------------------------- diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index acc401cf0ec78..c455c55f5ea7f 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -21,13 +21,11 @@ execute `exists` queries using those fields you might want to disable [source,js] -------------------------------------------------- -PUT tweets?include_type_name=true +PUT tweets { "mappings": { - "_doc": { - "_field_names": { - "enabled": false - } + "_field_names": { + "enabled": false } } } diff --git a/docs/reference/mapping/fields/meta-field.asciidoc b/docs/reference/mapping/fields/meta-field.asciidoc index bdfa7d94d2c0c..b2225dba4e810 100644 --- a/docs/reference/mapping/fields/meta-field.asciidoc +++ b/docs/reference/mapping/fields/meta-field.asciidoc @@ -7,16 +7,14 @@ metadata, such as the class that a document belongs to: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "_meta": { <1> - "class": "MyApp::User", - "version": { - "min": "1.0", - "max": "1.3" - } + "_meta": { <1> + "class": "MyApp::User", + "version": { + "min": "1.0", + "max": "1.3" } } } diff --git a/docs/reference/mapping/fields/routing-field.asciidoc b/docs/reference/mapping/fields/routing-field.asciidoc index ca823b2430b12..6c75b91b522a9 100644 --- a/docs/reference/mapping/fields/routing-field.asciidoc +++ b/docs/reference/mapping/fields/routing-field.asciidoc @@ -79,13 +79,11 @@ custom `routing` value required for all CRUD operations: [source,js] ------------------------------ -PUT my_index2?include_type_name=true +PUT my_index2 { "mappings": { - "_doc": { - "_routing": { - "required": true <1> - } + "_routing": { + "required": true <1> } } } diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index 8f7943ebc0ff6..757fc0fa5b662 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -13,13 +13,11 @@ within the index. For this reason, it can be disabled as follows: [source,js] -------------------------------------------------- -PUT tweets?include_type_name=true +PUT tweets { "mappings": { - "_doc": { - "_source": { - "enabled": false - } + "_source": { + "enabled": false } } } @@ -85,20 +83,18 @@ as follows: [source,js] -------------------------------------------------- -PUT logs?include_type_name=true +PUT logs { "mappings": { - "_doc": { - "_source": { - "includes": [ - "*.count", - "meta.*" - ], - "excludes": [ - "meta.description", - "meta.other.*" - ] - } + "_source": { + "includes": [ + "*.count", + "meta.*" + ], + "excludes": [ + "meta.description", + "meta.other.*" + ] } } } diff --git a/docs/reference/mapping/params/analyzer.asciidoc b/docs/reference/mapping/params/analyzer.asciidoc index 43cfb7082796f..b2652bf999a77 100644 --- a/docs/reference/mapping/params/analyzer.asciidoc +++ b/docs/reference/mapping/params/analyzer.asciidoc @@ -41,18 +41,16 @@ in the field mapping, as follows: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings": { - "_doc": { - "properties": { - "text": { <1> - "type": "text", - "fields": { - "english": { <2> - "type": "text", - "analyzer": "english" - } + "properties": { + "text": { <1> + "type": "text", + "fields": { + "english": { <2> + "type": "text", + "analyzer": "english" } } } @@ -93,7 +91,7 @@ To disable stop words for phrases a field utilising three analyzer settings will [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings":{ "analysis":{ @@ -123,14 +121,12 @@ PUT my_index?include_type_name=true } }, "mappings":{ - "_doc":{ - "properties":{ - "title": { - "type":"text", - "analyzer":"my_analyzer", <3> - "search_analyzer":"my_stop_analyzer", <4> - "search_quote_analyzer":"my_analyzer" <5> - } + "properties":{ + "title": { + "type":"text", + "analyzer":"my_analyzer", <3> + "search_analyzer":"my_stop_analyzer", <4> + "search_quote_analyzer":"my_analyzer" <5> } } } diff --git a/docs/reference/mapping/params/boost.asciidoc b/docs/reference/mapping/params/boost.asciidoc index c6f6d104c0ca0..7da03a66ac44e 100644 --- a/docs/reference/mapping/params/boost.asciidoc +++ b/docs/reference/mapping/params/boost.asciidoc @@ -6,18 +6,16 @@ Individual fields can be _boosted_ automatically -- count more towards the relev [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "boost": 2 <1> - }, - "content": { - "type": "text" - } + "properties": { + "title": { + "type": "text", + "boost": 2 <1> + }, + "content": { + "type": "text" } } } diff --git a/docs/reference/mapping/params/coerce.asciidoc b/docs/reference/mapping/params/coerce.asciidoc index dbb7ca84ba6fd..55f31262351fd 100644 --- a/docs/reference/mapping/params/coerce.asciidoc +++ b/docs/reference/mapping/params/coerce.asciidoc @@ -17,18 +17,16 @@ For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "number_one": { - "type": "integer" - }, - "number_two": { - "type": "integer", - "coerce": false - } + "properties": { + "number_one": { + "type": "integer" + }, + "number_two": { + "type": "integer", + "coerce": false } } } @@ -61,21 +59,19 @@ coercion globally across all mapping types: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.mapping.coerce": false }, "mappings": { - "_doc": { - "properties": { - "number_one": { - "type": "integer", - "coerce": true - }, - "number_two": { - "type": "integer" - } + "properties": { + "number_one": { + "type": "integer", + "coerce": true + }, + "number_two": { + "type": "integer" } } } diff --git a/docs/reference/mapping/params/copy-to.asciidoc b/docs/reference/mapping/params/copy-to.asciidoc index d56258aa73324..1796b31360aed 100644 --- a/docs/reference/mapping/params/copy-to.asciidoc +++ b/docs/reference/mapping/params/copy-to.asciidoc @@ -8,22 +8,20 @@ the `full_name` field as follows: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "first_name": { - "type": "text", - "copy_to": "full_name" <1> - }, - "last_name": { - "type": "text", - "copy_to": "full_name" <1> - }, - "full_name": { - "type": "text" - } + "properties": { + "first_name": { + "type": "text", + "copy_to": "full_name" <1> + }, + "last_name": { + "type": "text", + "copy_to": "full_name" <1> + }, + "full_name": { + "type": "text" } } } diff --git a/docs/reference/mapping/params/doc-values.asciidoc b/docs/reference/mapping/params/doc-values.asciidoc index 74efc1bbbfabd..5680cdabba884 100644 --- a/docs/reference/mapping/params/doc-values.asciidoc +++ b/docs/reference/mapping/params/doc-values.asciidoc @@ -23,18 +23,16 @@ value from a script, you can disable doc values in order to save disk space: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "status_code": { <1> - "type": "keyword" - }, - "session_id": { <2> - "type": "keyword", - "doc_values": false - } + "properties": { + "status_code": { <1> + "type": "keyword" + }, + "session_id": { <2> + "type": "keyword", + "doc_values": false } } } diff --git a/docs/reference/mapping/params/dynamic.asciidoc b/docs/reference/mapping/params/dynamic.asciidoc index f7159724f48f4..62d61f5f095a6 100644 --- a/docs/reference/mapping/params/dynamic.asciidoc +++ b/docs/reference/mapping/params/dynamic.asciidoc @@ -58,21 +58,19 @@ object or from the mapping type. For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic": false, <1> - "properties": { - "user": { <2> - "properties": { - "name": { - "type": "text" - }, - "social_networks": { <3> - "dynamic": true, - "properties": {} - } + "dynamic": false, <1> + "properties": { + "user": { <2> + "properties": { + "name": { + "type": "text" + }, + "social_networks": { <3> + "dynamic": true, + "properties": {} } } } diff --git a/docs/reference/mapping/params/enabled.asciidoc b/docs/reference/mapping/params/enabled.asciidoc index fbcd25c716bbf..06b76ddeae006 100644 --- a/docs/reference/mapping/params/enabled.asciidoc +++ b/docs/reference/mapping/params/enabled.asciidoc @@ -15,20 +15,18 @@ in any other way: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "user_id": { - "type": "keyword" - }, - "last_updated": { - "type": "date" - }, - "session_data": { <1> - "enabled": false - } + "properties": { + "user_id": { + "type": "keyword" + }, + "last_updated": { + "type": "date" + }, + "session_data": { <1> + "enabled": false } } } @@ -63,12 +61,10 @@ retrieved, but none of its contents are indexed in any way: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { <1> - "enabled": false - } + "enabled": false <1> } } @@ -88,7 +84,7 @@ GET my_index/_doc/session_1 <2> GET my_index/_mapping <3> -------------------------------------------------- // CONSOLE -<1> The entire `_doc` mapping type is disabled. +<1> The entire mapping type is disabled. <2> The document can be retrieved. <3> Checking the mapping reveals that no fields have been added. diff --git a/docs/reference/mapping/params/fielddata.asciidoc b/docs/reference/mapping/params/fielddata.asciidoc index eee7463c6400c..42f02b7ee28ea 100644 --- a/docs/reference/mapping/params/fielddata.asciidoc +++ b/docs/reference/mapping/params/fielddata.asciidoc @@ -55,17 +55,15 @@ enabled for aggregations, as follows: [source,js] --------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_field": { <1> - "type": "text", - "fields": { - "keyword": { <2> - "type": "keyword" - } + "properties": { + "my_field": { <1> + "type": "text", + "fields": { + "keyword": { <2> + "type": "keyword" } } } @@ -118,19 +116,17 @@ number of docs that the segment should contain with `min_segment_size`: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "tag": { - "type": "text", - "fielddata": true, - "fielddata_frequency_filter": { - "min": 0.001, - "max": 0.1, - "min_segment_size": 500 - } + "properties": { + "tag": { + "type": "text", + "fielddata": true, + "fielddata_frequency_filter": { + "min": 0.001, + "max": 0.1, + "min_segment_size": 500 } } } diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index 641433685d3a2..2be1bdf12d891 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -11,15 +11,13 @@ Besides the <>, your own [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date", - "format": "yyyy-MM-dd" - } + "properties": { + "date": { + "type": "date", + "format": "yyyy-MM-dd" } } } diff --git a/docs/reference/mapping/params/ignore-above.asciidoc b/docs/reference/mapping/params/ignore-above.asciidoc index 19c275dac5d01..daf5c92bcf34d 100644 --- a/docs/reference/mapping/params/ignore-above.asciidoc +++ b/docs/reference/mapping/params/ignore-above.asciidoc @@ -8,15 +8,13 @@ NOTE: All strings/array elements will still be present in the `_source` field, i [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "message": { - "type": "keyword", - "ignore_above": 20 <1> - } + "properties": { + "message": { + "type": "keyword", + "ignore_above": 20 <1> } } } diff --git a/docs/reference/mapping/params/ignore-malformed.asciidoc b/docs/reference/mapping/params/ignore-malformed.asciidoc index 84a53515d9ebe..30aa6c4e8bc0d 100644 --- a/docs/reference/mapping/params/ignore-malformed.asciidoc +++ b/docs/reference/mapping/params/ignore-malformed.asciidoc @@ -14,18 +14,16 @@ For example: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "number_one": { - "type": "integer", - "ignore_malformed": true - }, - "number_two": { - "type": "integer" - } + "properties": { + "number_one": { + "type": "integer", + "ignore_malformed": true + }, + "number_two": { + "type": "integer" } } } @@ -61,21 +59,19 @@ allow to ignore malformed content globally across all mapping types. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.mapping.ignore_malformed": true <1> }, "mappings": { - "_doc": { - "properties": { - "number_one": { <1> - "type": "byte" - }, - "number_two": { - "type": "integer", - "ignore_malformed": false <2> - } + "properties": { + "number_one": { <1> + "type": "byte" + }, + "number_two": { + "type": "integer", + "ignore_malformed": false <2> } } } diff --git a/docs/reference/mapping/params/index-options.asciidoc b/docs/reference/mapping/params/index-options.asciidoc index cda680399dde2..527050a87b29b 100644 --- a/docs/reference/mapping/params/index-options.asciidoc +++ b/docs/reference/mapping/params/index-options.asciidoc @@ -35,15 +35,13 @@ all other fields use `docs` as the default. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "index_options": "offsets" - } + "properties": { + "text": { + "type": "text", + "index_options": "offsets" } } } diff --git a/docs/reference/mapping/params/index-prefixes.asciidoc b/docs/reference/mapping/params/index-prefixes.asciidoc index baca8606f5c03..841ccdde08776 100644 --- a/docs/reference/mapping/params/index-prefixes.asciidoc +++ b/docs/reference/mapping/params/index-prefixes.asciidoc @@ -19,15 +19,13 @@ This example creates a text field using the default prefix length settings: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "body_text": { - "type": "text", - "index_prefixes": { } <1> - } + "properties": { + "body_text": { + "type": "text", + "index_prefixes": { } <1> } } } @@ -42,17 +40,15 @@ This example uses custom prefix length settings: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "full_name": { - "type": "text", - "index_prefixes": { - "min_chars" : 1, - "max_chars" : 10 - } + "properties": { + "full_name": { + "type": "text", + "index_prefixes": { + "min_chars" : 1, + "max_chars" : 10 } } } diff --git a/docs/reference/mapping/params/multi-fields.asciidoc b/docs/reference/mapping/params/multi-fields.asciidoc index e4bdb04506d92..ee1bc02c7fd8d 100644 --- a/docs/reference/mapping/params/multi-fields.asciidoc +++ b/docs/reference/mapping/params/multi-fields.asciidoc @@ -8,17 +8,15 @@ search, and as a `keyword` field for sorting or aggregations: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "city": { - "type": "text", - "fields": { - "raw": { <1> - "type": "keyword" - } + "properties": { + "city": { + "type": "text", + "fields": { + "raw": { <1> + "type": "keyword" } } } @@ -76,18 +74,16 @@ which stems words into their root form: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "text": { <1> - "type": "text", - "fields": { - "english": { <2> - "type": "text", - "analyzer": "english" - } + "properties": { + "text": { <1> + "type": "text", + "fields": { + "english": { <2> + "type": "text", + "analyzer": "english" } } } diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc index 79ba39e194726..da0298abda228 100644 --- a/docs/reference/mapping/params/normalizer.asciidoc +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -12,7 +12,7 @@ such as the <> query. [source,js] -------------------------------- -PUT index?include_type_name=true +PUT index { "settings": { "analysis": { @@ -26,12 +26,10 @@ PUT index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "keyword", - "normalizer": "my_normalizer" - } + "properties": { + "foo": { + "type": "keyword", + "normalizer": "my_normalizer" } } } diff --git a/docs/reference/mapping/params/null-value.asciidoc b/docs/reference/mapping/params/null-value.asciidoc index b2f46a2c7cef3..0a618ddcac9bc 100644 --- a/docs/reference/mapping/params/null-value.asciidoc +++ b/docs/reference/mapping/params/null-value.asciidoc @@ -10,15 +10,13 @@ the specified value so that it can be indexed and searched. For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "status_code": { - "type": "keyword", - "null_value": "NULL" <1> - } + "properties": { + "status_code": { + "type": "keyword", + "null_value": "NULL" <1> } } } diff --git a/docs/reference/mapping/params/position-increment-gap.asciidoc b/docs/reference/mapping/params/position-increment-gap.asciidoc index ae6cc85865709..853f98ade75b8 100644 --- a/docs/reference/mapping/params/position-increment-gap.asciidoc +++ b/docs/reference/mapping/params/position-increment-gap.asciidoc @@ -51,15 +51,13 @@ The `position_increment_gap` can be specified in the mapping. For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "names": { - "type": "text", - "position_increment_gap": 0 <1> - } + "properties": { + "names": { + "type": "text", + "position_increment_gap": 0 <1> } } } diff --git a/docs/reference/mapping/params/properties.asciidoc b/docs/reference/mapping/params/properties.asciidoc index 2efef0abf3cdc..9837dd3d5d9f4 100644 --- a/docs/reference/mapping/params/properties.asciidoc +++ b/docs/reference/mapping/params/properties.asciidoc @@ -15,23 +15,21 @@ field, and a `nested` field: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { <1> - "properties": { - "manager": { <2> - "properties": { - "age": { "type": "integer" }, - "name": { "type": "text" } - } - }, - "employees": { <3> - "type": "nested", - "properties": { - "age": { "type": "integer" }, - "name": { "type": "text" } - } + "properties": { <1> + "manager": { + "properties": { <2> + "age": { "type": "integer" }, + "name": { "type": "text" } + } + }, + "employees": { + "type": "nested", + "properties": { <3> + "age": { "type": "integer" }, + "name": { "type": "text" } } } } @@ -58,7 +56,7 @@ PUT my_index/_doc/1 <4> } -------------------------------------------------- // CONSOLE -<1> Properties under the `_doc` mapping type. +<1> Properties in the top-level mappings definition. <2> Properties under the `manager` object field. <3> Properties under the `employees` nested field. <4> An example document which corresponds to the above mapping. diff --git a/docs/reference/mapping/params/search-analyzer.asciidoc b/docs/reference/mapping/params/search-analyzer.asciidoc index eb483a3af384f..9b142f58c5960 100644 --- a/docs/reference/mapping/params/search-analyzer.asciidoc +++ b/docs/reference/mapping/params/search-analyzer.asciidoc @@ -14,7 +14,7 @@ this can be overridden with the `search_analyzer` setting: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -38,13 +38,11 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "analyzer": "autocomplete", <2> - "search_analyzer": "standard" <2> - } + "properties": { + "text": { + "type": "text", + "analyzer": "autocomplete", <2> + "search_analyzer": "standard" <2> } } } diff --git a/docs/reference/mapping/params/similarity.asciidoc b/docs/reference/mapping/params/similarity.asciidoc index 9e6e4e0877830..8085adf9110de 100644 --- a/docs/reference/mapping/params/similarity.asciidoc +++ b/docs/reference/mapping/params/similarity.asciidoc @@ -36,18 +36,16 @@ as follows: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "default_field": { <1> - "type": "text" - }, - "boolean_sim_field": { - "type": "text", - "similarity": "boolean" <2> - } + "properties": { + "default_field": { <1> + "type": "text" + }, + "boolean_sim_field": { + "type": "text", + "similarity": "boolean" <2> } } } diff --git a/docs/reference/mapping/params/store.asciidoc b/docs/reference/mapping/params/store.asciidoc index 186666ef5dd73..d3ebe13d4ad62 100644 --- a/docs/reference/mapping/params/store.asciidoc +++ b/docs/reference/mapping/params/store.asciidoc @@ -18,22 +18,20 @@ to extract those fields from a large `_source` field: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "store": true <1> - }, - "date": { - "type": "date", - "store": true <1> - }, - "content": { - "type": "text" - } + "properties": { + "title": { + "type": "text", + "store": true <1> + }, + "date": { + "type": "date", + "store": true <1> + }, + "content": { + "type": "text" } } } diff --git a/docs/reference/mapping/params/term-vector.asciidoc b/docs/reference/mapping/params/term-vector.asciidoc index 491a002f3d612..ff05539522efc 100644 --- a/docs/reference/mapping/params/term-vector.asciidoc +++ b/docs/reference/mapping/params/term-vector.asciidoc @@ -29,15 +29,13 @@ index. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "term_vector": "with_positions_offsets" - } + "properties": { + "text": { + "type": "text", + "term_vector": "with_positions_offsets" } } } diff --git a/docs/reference/mapping/types/alias.asciidoc b/docs/reference/mapping/types/alias.asciidoc index 75a2b65e5c110..318124d71333e 100644 --- a/docs/reference/mapping/types/alias.asciidoc +++ b/docs/reference/mapping/types/alias.asciidoc @@ -7,21 +7,19 @@ and selected other APIs like <>. [source,js] -------------------------------- -PUT trips?include_type_name=true +PUT trips { "mappings": { - "_doc": { - "properties": { - "distance": { - "type": "long" - }, - "route_length_miles": { - "type": "alias", - "path": "distance" // <1> - }, - "transit_mode": { - "type": "keyword" - } + "properties": { + "distance": { + "type": "long" + }, + "route_length_miles": { + "type": "alias", + "path": "distance" // <1> + }, + "transit_mode": { + "type": "keyword" } } } diff --git a/docs/reference/mapping/types/binary.asciidoc b/docs/reference/mapping/types/binary.asciidoc index ebd50802b8d84..22e107dab565d 100644 --- a/docs/reference/mapping/types/binary.asciidoc +++ b/docs/reference/mapping/types/binary.asciidoc @@ -7,17 +7,15 @@ stored by default and is not searchable: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "name": { - "type": "text" - }, - "blob": { - "type": "binary" - } + "properties": { + "name": { + "type": "text" + }, + "blob": { + "type": "binary" } } } diff --git a/docs/reference/mapping/types/boolean.asciidoc b/docs/reference/mapping/types/boolean.asciidoc index 3a92ae23e2fd9..962022060b65b 100644 --- a/docs/reference/mapping/types/boolean.asciidoc +++ b/docs/reference/mapping/types/boolean.asciidoc @@ -17,14 +17,12 @@ For example: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "is_published": { - "type": "boolean" - } + "properties": { + "is_published": { + "type": "boolean" } } } diff --git a/docs/reference/mapping/types/date.asciidoc b/docs/reference/mapping/types/date.asciidoc index 03024d1383945..94aadb46fb2b6 100644 --- a/docs/reference/mapping/types/date.asciidoc +++ b/docs/reference/mapping/types/date.asciidoc @@ -30,14 +30,12 @@ For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date" <1> - } + "properties": { + "date": { + "type": "date" <1> } } } @@ -74,15 +72,13 @@ into a string. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date", - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" - } + "properties": { + "date": { + "type": "date", + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" } } } diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index b12e9e120a479..b97566361a05c 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -17,17 +17,15 @@ You index a dense vector as an array of floats. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_vector": { - "type": "dense_vector" - }, - "my_text" : { - "type" : "keyword" - } + "properties": { + "my_vector": { + "type": "dense_vector" + }, + "my_text" : { + "type" : "keyword" } } } diff --git a/docs/reference/mapping/types/feature-vector.asciidoc b/docs/reference/mapping/types/feature-vector.asciidoc index 25358ab9ec3a6..b4701fc9ab7dd 100644 --- a/docs/reference/mapping/types/feature-vector.asciidoc +++ b/docs/reference/mapping/types/feature-vector.asciidoc @@ -11,14 +11,12 @@ one field to the mappings for each of them. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "topics": { - "type": "feature_vector" <1> - } + "properties": { + "topics": { + "type": "feature_vector" <1> } } } diff --git a/docs/reference/mapping/types/feature.asciidoc b/docs/reference/mapping/types/feature.asciidoc index 76eada86c59e5..7fe8ff6f935af 100644 --- a/docs/reference/mapping/types/feature.asciidoc +++ b/docs/reference/mapping/types/feature.asciidoc @@ -6,18 +6,16 @@ documents in queries with a <> query. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "pagerank": { - "type": "feature" <1> - }, - "url_length": { - "type": "feature", - "positive_score_impact": false <2> - } + "properties": { + "pagerank": { + "type": "feature" <1> + }, + "url_length": { + "type": "feature", + "positive_score_impact": false <2> } } } diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 752e611db811f..51e137fbc33b6 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -15,14 +15,12 @@ There are four ways that a geo-point may be specified, as demonstrated below: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "location": { + "type": "geo_point" } } } diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index 6206df61ac3b6..7f3f5f57d7077 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -6,14 +6,12 @@ https://en.wikipedia.org/wiki/IPv6[IPv6] addresses. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "ip_addr": { - "type": "ip" - } + "properties": { + "ip_addr": { + "type": "ip" } } } diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index e9e913be3a6d0..8ac0983dc9550 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -15,14 +15,12 @@ Below is an example of a mapping for a keyword field: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "tags": { - "type": "keyword" - } + "properties": { + "tags": { + "type": "keyword" } } } diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 4681897575189..f420e680c8590 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -75,14 +75,12 @@ queried independently of the others, with the < - } + "properties": { + "user": { + "type": "nested" <1> } } } diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index a279f9ca24e6a..f2977957ff463 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -17,21 +17,19 @@ Below is an example of configuring a mapping with numeric fields: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "number_of_bytes": { - "type": "integer" - }, - "time_in_seconds": { - "type": "float" - }, - "price": { - "type": "scaled_float", - "scaling_factor": 100 - } + "properties": { + "number_of_bytes": { + "type": "integer" + }, + "time_in_seconds": { + "type": "float" + }, + "price": { + "type": "scaled_float", + "scaling_factor": 100 } } } diff --git a/docs/reference/mapping/types/object.asciidoc b/docs/reference/mapping/types/object.asciidoc index 1bf9afa3c3277..f5b9a9df85617 100644 --- a/docs/reference/mapping/types/object.asciidoc +++ b/docs/reference/mapping/types/object.asciidoc @@ -41,22 +41,20 @@ An explicit mapping for the above document could look like this: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { <1> - "properties": { - "region": { - "type": "keyword" - }, - "manager": { <2> - "properties": { - "age": { "type": "integer" }, - "name": { <3> - "properties": { - "first": { "type": "text" }, - "last": { "type": "text" } - } + "properties": { <1> + "region": { + "type": "keyword" + }, + "manager": { <2> + "properties": { + "age": { "type": "integer" }, + "name": { <3> + "properties": { + "first": { "type": "text" }, + "last": { "type": "text" } } } } @@ -66,7 +64,7 @@ PUT my_index?include_type_name=true } -------------------------------------------------- // CONSOLE -<1> The mapping type is a type of object, and has a `properties` field. +<1> Properties in the top-level mappings definition. <2> The `manager` field is an inner `object` field. <3> The `manager.name` field is an inner `object` field within the `manager` field. diff --git a/docs/reference/mapping/types/parent-join.asciidoc b/docs/reference/mapping/types/parent-join.asciidoc index dacef7c4bc7cb..39bcaa96d7764 100644 --- a/docs/reference/mapping/types/parent-join.asciidoc +++ b/docs/reference/mapping/types/parent-join.asciidoc @@ -9,16 +9,14 @@ A parent/child relation can be defined as follows: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { <1> - "type": "join", - "relations": { - "question": "answer" <2> - } + "properties": { + "my_join_field": { <1> + "type": "join", + "relations": { + "question": "answer" <2> } } } @@ -319,18 +317,16 @@ make sense to disable eager loading: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "question": "answer" - }, - "eager_global_ordinals": false - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "question": "answer" + }, + "eager_global_ordinals": false } } } @@ -358,16 +354,14 @@ It is also possible to define multiple children for a single parent: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "question": ["answer", "comment"] <1> - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "question": ["answer", "comment"] <1> } } } @@ -388,17 +382,15 @@ Multiple levels of parent/child: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "question": ["answer", "comment"], <1> - "answer": "vote" <2> - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "question": ["answer", "comment"], <1> + "answer": "vote" <2> } } } diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index 7324826eb44ef..0096746d2df35 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -15,17 +15,15 @@ If the following mapping configures the `percolator` field type for the [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "query": { - "type": "percolator" - }, - "field": { - "type": "text" - } + "properties": { + "query": { + "type": "percolator" + }, + "field": { + "type": "text" } } } @@ -69,17 +67,15 @@ Lets take a look at the following index with a percolator field type: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc" : { - "properties": { - "query" : { - "type" : "percolator" - }, - "body" : { - "type": "text" - } + "properties": { + "query" : { + "type" : "percolator" + }, + "body" : { + "type": "text" } } } @@ -117,17 +113,15 @@ to read your queries you need to reindex your queries into a new index on the cu [source,js] -------------------------------------------------- -PUT new_index?include_type_name=true +PUT new_index { "mappings": { - "_doc" : { - "properties": { - "query" : { - "type" : "percolator" - }, - "body" : { - "type": "text" - } + "properties": { + "query" : { + "type" : "percolator" + }, + "body" : { + "type": "text" } } } @@ -269,7 +263,7 @@ with these settings and mapping: [source,js] -------------------------------------------------- -PUT /test_index?include_type_name=true +PUT /test_index { "settings": { "analysis": { @@ -282,15 +276,13 @@ PUT /test_index?include_type_name=true } }, "mappings": { - "_doc" : { - "properties": { - "query" : { - "type": "percolator" - }, - "body" : { - "type": "text", - "analyzer": "my_analyzer" <1> - } + "properties": { + "query" : { + "type": "percolator" + }, + "body" : { + "type": "text", + "analyzer": "my_analyzer" <1> } } } @@ -442,7 +434,7 @@ Creating an index with custom analysis settings: [source,js] -------------------------------------------------- -PUT my_queries1?include_type_name=true +PUT my_queries1 { "settings": { "analysis": { @@ -466,19 +458,17 @@ PUT my_queries1?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "query": { - "type": "percolator" - }, - "my_field": { - "type": "text", - "fields": { - "prefix": { <3> - "type": "text", - "analyzer": "wildcard_prefix", - "search_analyzer": "standard" - } + "properties": { + "query": { + "type": "percolator" + }, + "my_field": { + "type": "text", + "fields": { + "prefix": { <3> + "type": "text", + "analyzer": "wildcard_prefix", + "search_analyzer": "standard" } } } @@ -595,7 +585,7 @@ before the `edge_ngram` token filter. [source,js] -------------------------------------------------- -PUT my_queries2?include_type_name=true +PUT my_queries2 { "settings": { "analysis": { @@ -628,19 +618,17 @@ PUT my_queries2?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "query": { - "type": "percolator" - }, - "my_field": { - "type": "text", - "fields": { - "suffix": { - "type": "text", - "analyzer": "wildcard_suffix", - "search_analyzer": "wildcard_suffix_search_time" <1> - } + "properties": { + "query": { + "type": "percolator" + }, + "my_field": { + "type": "text", + "fields": { + "suffix": { + "type": "text", + "analyzer": "wildcard_suffix", + "search_analyzer": "wildcard_suffix_search_time" <1> } } } diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index 630458b4866e5..79c9e6629c696 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -16,21 +16,19 @@ Below is an example of configuring a mapping with various range fields followed [source,js] -------------------------------------------------- -PUT range_index?include_type_name=true +PUT range_index { "settings": { "number_of_shards": 2 }, "mappings": { - "_doc": { - "properties": { - "expected_attendees": { - "type": "integer_range" - }, - "time_frame": { - "type": "date_range", <1> - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" - } + "properties": { + "expected_attendees": { + "type": "integer_range" + }, + "time_frame": { + "type": "date_range", <1> + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" } } } diff --git a/docs/reference/mapping/types/sparse-vector.asciidoc b/docs/reference/mapping/types/sparse-vector.asciidoc index df374e857ba6a..38561789b5d3f 100644 --- a/docs/reference/mapping/types/sparse-vector.asciidoc +++ b/docs/reference/mapping/types/sparse-vector.asciidoc @@ -20,17 +20,15 @@ Dimensions don't need to be in order. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_vector": { - "type": "sparse_vector" - }, - "my_text" : { - "type" : "keyword" - } + "properties": { + "my_vector": { + "type": "sparse_vector" + }, + "my_text" : { + "type" : "keyword" } } } diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index 1b3c0a0eaf112..ee972918988ad 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -17,14 +17,12 @@ Below is an example of a mapping for a text field: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "full_name": { - "type": "text" - } + "properties": { + "full_name": { + "type": "text" } } } diff --git a/docs/reference/mapping/types/token-count.asciidoc b/docs/reference/mapping/types/token-count.asciidoc index 469583abf1bcd..d574c25e93d19 100644 --- a/docs/reference/mapping/types/token-count.asciidoc +++ b/docs/reference/mapping/types/token-count.asciidoc @@ -9,18 +9,16 @@ For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "name": { <1> - "type": "text", - "fields": { - "length": { <2> - "type": "token_count", - "analyzer": "standard" - } + "properties": { + "name": { <1> + "type": "text", + "fields": { + "length": { <2> + "type": "token_count", + "analyzer": "standard" } } } diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index d2c935ba6d32d..18c611e97cac1 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -24,6 +24,8 @@ You must create a job before you create a {dfeed}. You can associate only one `feed_id` (required):: (string) A numerical character string that uniquely identifies the {dfeed}. + This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + hyphens, and underscores. It must start and end with alphanumeric characters. ==== Request Body diff --git a/docs/reference/ml/transforms.asciidoc b/docs/reference/ml/transforms.asciidoc index 02dd4d5469cfd..a3e7df9fdf27a 100644 --- a/docs/reference/ml/transforms.asciidoc +++ b/docs/reference/ml/transforms.asciidoc @@ -26,51 +26,49 @@ subsequent examples: [source,js] ---------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings":{ - "_doc":{ - "properties": { - "@timestamp": { - "type": "date" - }, - "aborted_count": { - "type": "long" - }, - "another_field": { - "type": "keyword" <1> - }, - "clientip": { - "type": "keyword" - }, - "coords": { - "properties": { - "lat": { - "type": "keyword" - }, - "lon": { - "type": "keyword" - } + "properties": { + "@timestamp": { + "type": "date" + }, + "aborted_count": { + "type": "long" + }, + "another_field": { + "type": "keyword" <1> + }, + "clientip": { + "type": "keyword" + }, + "coords": { + "properties": { + "lat": { + "type": "keyword" + }, + "lon": { + "type": "keyword" } - }, - "error_count": { - "type": "long" - }, - "query": { - "type": "keyword" - }, - "some_field": { - "type": "keyword" - }, - "tokenstring1":{ - "type":"keyword" - }, - "tokenstring2":{ - "type":"keyword" - }, - "tokenstring3":{ - "type":"keyword" } + }, + "error_count": { + "type": "long" + }, + "query": { + "type": "keyword" + }, + "some_field": { + "type": "keyword" + }, + "tokenstring1":{ + "type":"keyword" + }, + "tokenstring2":{ + "type":"keyword" + }, + "tokenstring3":{ + "type":"keyword" } } } diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index 16828c3751a39..b2e27c76e494d 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -52,15 +52,13 @@ instance, if the `user` field were mapped as follows: [source,js] -------------------------------------------------- -PUT /example?include_type_name=true +PUT /example { "mappings": { - "_doc": { - "properties": { - "user": { - "type": "keyword", - "null_value": "_null_" - } + "properties": { + "user": { + "type": "keyword", + "null_value": "_null_" } } } diff --git a/docs/reference/query-dsl/feature-query.asciidoc b/docs/reference/query-dsl/feature-query.asciidoc index c261133cff412..353c135bf8efd 100644 --- a/docs/reference/query-dsl/feature-query.asciidoc +++ b/docs/reference/query-dsl/feature-query.asciidoc @@ -27,21 +27,19 @@ based or `pagerank`, `url_length` and the `sports` topic. [source,js] -------------------------------------------------- -PUT test?include_type_name=true +PUT test { "mappings": { - "_doc": { - "properties": { - "pagerank": { - "type": "feature" - }, - "url_length": { - "type": "feature", - "positive_score_impact": false - }, - "topics": { - "type": "feature_vector" - } + "properties": { + "pagerank": { + "type": "feature" + }, + "url_length": { + "type": "feature", + "positive_score_impact": false + }, + "topics": { + "type": "feature_vector" } } } diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 8c2b25ea0ee7a..487e944c09e10 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -6,16 +6,14 @@ bounding box. Assuming the following indexed document: [source,js] -------------------------------------------------- -PUT /my_locations?include_type_name=true +PUT /my_locations { "mappings": { - "_doc": { - "properties": { - "pin": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "pin": { + "properties": { + "location": { + "type": "geo_point" } } } diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index da1f99f834d67..da7b0ecfd81e5 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -7,16 +7,14 @@ document: [source,js] -------------------------------------------------- -PUT /my_locations?include_type_name=true +PUT /my_locations { "mappings": { - "_doc": { - "properties": { - "pin": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "pin": { + "properties": { + "location": { + "type": "geo_point" } } } diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index 4121ebfc45edd..424968090d6ab 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -24,14 +24,12 @@ Given the following index: [source,js] -------------------------------------------------- -PUT /example?include_type_name=true +PUT /example { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_shape" - } + "properties": { + "location": { + "type": "geo_shape" } } } @@ -99,14 +97,12 @@ shape: [source,js] -------------------------------------------------- -PUT /shapes?include_type_name=true +PUT /shapes { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_shape" - } + "properties": { + "location": { + "type": "geo_shape" } } } diff --git a/docs/reference/query-dsl/nested-query.asciidoc b/docs/reference/query-dsl/nested-query.asciidoc index f75348ca976ec..c58d68b73cff1 100644 --- a/docs/reference/query-dsl/nested-query.asciidoc +++ b/docs/reference/query-dsl/nested-query.asciidoc @@ -10,14 +10,12 @@ will work with: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings": { - "_doc" : { - "properties" : { - "obj1" : { - "type" : "nested" - } + "properties" : { + "obj1" : { + "type" : "nested" } } } diff --git a/docs/reference/query-dsl/parent-id-query.asciidoc b/docs/reference/query-dsl/parent-id-query.asciidoc index ff306238ddea0..aa2074e7d1b7e 100644 --- a/docs/reference/query-dsl/parent-id-query.asciidoc +++ b/docs/reference/query-dsl/parent-id-query.asciidoc @@ -6,16 +6,14 @@ Given the following mapping definition: [source,js] -------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "my_parent": "my_child" - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "my_parent": "my_child" } } } diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index 9ae6dd802137c..89264af0f2619 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -13,18 +13,16 @@ Create an index with two fields: [source,js] -------------------------------------------------- -PUT /my-index?include_type_name=true +PUT /my-index { "mappings": { - "_doc": { - "properties": { - "message": { - "type": "text" - }, - "query": { - "type": "percolator" - } - } + "properties": { + "message": { + "type": "text" + }, + "query": { + "type": "percolator" + } } } } diff --git a/docs/reference/query-dsl/term-query.asciidoc b/docs/reference/query-dsl/term-query.asciidoc index 26dd6ee492f21..910123bbe6177 100644 --- a/docs/reference/query-dsl/term-query.asciidoc +++ b/docs/reference/query-dsl/term-query.asciidoc @@ -87,17 +87,15 @@ To demonstrate, try out the example below. First, create an index, specifying t [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "full_text": { - "type": "text" <1> - }, - "exact_value": { - "type": "keyword" <2> - } + "properties": { + "full_text": { + "type": "text" <1> + }, + "exact_value": { + "type": "keyword" <2> } } } diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 5c0e00ce360fa..3ebfb672e205f 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -12,14 +12,12 @@ be a number field: [source,js] -------------------------------------------------- -PUT /my-index?include_type_name=true +PUT /my-index { "mappings": { - "_doc": { - "properties": { - "required_matches": { - "type": "long" - } + "properties": { + "required_matches": { + "type": "long" } } } diff --git a/docs/reference/search/request/highlighters-internal.asciidoc b/docs/reference/search/request/highlighters-internal.asciidoc index aa3377bdccf61..11534a01aa234 100644 --- a/docs/reference/search/request/highlighters-internal.asciidoc +++ b/docs/reference/search/request/highlighters-internal.asciidoc @@ -87,15 +87,13 @@ using `english` analyzer, and will be indexed without offsets or term vectors. [source,js] -------------------------------------------------- -PUT test_index?include_type_name=true +PUT test_index { "mappings": { - "_doc": { - "properties": { - "content" : { - "type" : "text", - "analyzer" : "english" - } + "properties": { + "content" : { + "type" : "text", + "analyzer" : "english" } } } diff --git a/docs/reference/search/request/post-filter.asciidoc b/docs/reference/search/request/post-filter.asciidoc index b404a30a8250a..c46cdb1e52286 100644 --- a/docs/reference/search/request/post-filter.asciidoc +++ b/docs/reference/search/request/post-filter.asciidoc @@ -9,15 +9,13 @@ Imagine that you are selling shirts that have the following properties: [source,js] -------------------------------------------------- -PUT /shirts?include_type_name=true +PUT /shirts { "mappings": { - "_doc": { - "properties": { - "brand": { "type": "keyword"}, - "color": { "type": "keyword"}, - "model": { "type": "keyword"} - } + "properties": { + "brand": { "type": "keyword"}, + "color": { "type": "keyword"}, + "model": { "type": "keyword"} } } } diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index fe7cf362ea36f..bd8c0d1ad5c27 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -9,20 +9,18 @@ Assuming the following index mapping: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings": { - "_doc": { - "properties": { - "post_date": { "type": "date" }, - "user": { - "type": "keyword" - }, - "name": { - "type": "keyword" - }, - "age": { "type": "integer" } - } + "properties": { + "post_date": { "type": "date" }, + "user": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "age": { "type": "integer" } } } } diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 191f01f17394d..c0b527c06e550 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -24,17 +24,15 @@ which indexes the field values for fast completions. [source,js] -------------------------------------------------- -PUT music?include_type_name=true +PUT music { "mappings": { - "_doc" : { - "properties" : { - "suggest" : { - "type" : "completion" - }, - "title" : { - "type": "keyword" - } + "properties" : { + "suggest" : { + "type" : "completion" + }, + "title" : { + "type": "keyword" } } } diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index 1ef8968188ccd..63692f0b06f82 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -21,53 +21,49 @@ field: [source,js] -------------------------------------------------- -PUT place?include_type_name=true +PUT place { "mappings": { - "_doc" : { - "properties" : { - "suggest" : { - "type" : "completion", - "contexts": [ - { <1> - "name": "place_type", - "type": "category" - }, - { <2> - "name": "location", - "type": "geo", - "precision": 4 - } - ] - } + "properties" : { + "suggest" : { + "type" : "completion", + "contexts": [ + { <1> + "name": "place_type", + "type": "category" + }, + { <2> + "name": "location", + "type": "geo", + "precision": 4 + } + ] } } } } -PUT place_path_category?include_type_name=true +PUT place_path_category { "mappings": { - "_doc" : { - "properties" : { - "suggest" : { - "type" : "completion", - "contexts": [ - { <3> - "name": "place_type", - "type": "category", - "path": "cat" - }, - { <4> - "name": "location", - "type": "geo", - "precision": 4, - "path": "loc" - } - ] - }, - "loc": { - "type": "geo_point" - } + "properties" : { + "suggest" : { + "type" : "completion", + "contexts": [ + { <3> + "name": "place_type", + "type": "category", + "path": "cat" + }, + { <4> + "name": "location", + "type": "geo", + "precision": 4, + "path": "loc" + } + ] + }, + "loc": { + "type": "geo_point" } } } diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index 08a7aabd1c5de..9c2c56cc40fec 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -23,7 +23,7 @@ work. The `reverse` analyzer is used only in the last example. [source,js] -------------------------------------------------- -PUT test?include_type_name=true +PUT test { "settings": { "index": { @@ -52,19 +52,17 @@ PUT test?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "fields": { - "trigram": { - "type": "text", - "analyzer": "trigram" - }, - "reverse": { - "type": "text", - "analyzer": "reverse" - } + "properties": { + "title": { + "type": "text", + "fields": { + "trigram": { + "type": "text", + "analyzer": "trigram" + }, + "reverse": { + "type": "text", + "analyzer": "reverse" } } } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java b/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java index 87c0ff2817eb7..2d57faf5cb897 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java @@ -34,14 +34,17 @@ public WriteOperation createWriteOperation(SocketChannelContext context, Object return new FlushReadyWrite(context, (ByteBuffer[]) message, listener); } + @Override public List writeToBytes(WriteOperation writeOperation) { assert writeOperation instanceof FlushReadyWrite : "Write operation must be flush ready"; return Collections.singletonList((FlushReadyWrite) writeOperation); } + @Override public List pollFlushOperations() { return EMPTY_LIST; } + @Override public void close() {} } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/NioGroup.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioGroup.java index fe1bc1cf4043e..abb46cf3aea03 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/NioGroup.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/NioGroup.java @@ -19,161 +19,26 @@ package org.elasticsearch.nio; -import org.elasticsearch.nio.utils.ExceptionsHelper; - +import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; -import java.util.function.Supplier; -import java.util.stream.Collectors; -import java.util.stream.Stream; /** - * The NioGroup is a group of selectors for interfacing with java nio. When it is started it will create the - * configured number of selectors. Each selector will be running in a dedicated thread. Server connections - * can be bound using the {@link #bindServerChannel(InetSocketAddress, ChannelFactory)} method. Client - * connections can be opened using the {@link #openChannel(InetSocketAddress, ChannelFactory)} method. - *

- * The logic specific to a particular channel is provided by the {@link ChannelFactory} passed to the method - * when the channel is created. This is what allows an NioGroup to support different channel types. + * An class for interfacing with java.nio. Implementations implement the underlying logic for opening + * channels and registering them with the OS. */ -public class NioGroup implements AutoCloseable { - - - private final List dedicatedAcceptors; - private final RoundRobinSupplier acceptorSupplier; - - private final List selectors; - private final RoundRobinSupplier selectorSupplier; - - private final AtomicBoolean isOpen = new AtomicBoolean(true); +public interface NioGroup extends Closeable { /** - * This will create an NioGroup with no dedicated acceptors. All server channels will be handled by the - * same selectors that are handling child channels. - * - * @param threadFactory factory to create selector threads - * @param selectorCount the number of selectors to be created - * @param eventHandlerFunction function for creating event handlers - * @throws IOException occurs if there is a problem while opening a java.nio.Selector + * Opens and binds a server channel to accept incoming connections. */ - public NioGroup(ThreadFactory threadFactory, int selectorCount, Function, EventHandler> eventHandlerFunction) - throws IOException { - this(null, 0, threadFactory, selectorCount, eventHandlerFunction); - } + S bindServerChannel(InetSocketAddress address, ChannelFactory factory) throws IOException; /** - * This will create an NioGroup with dedicated acceptors. All server channels will be handled by a group - * of selectors dedicated to accepting channels. These accepted channels will be handed off the - * non-server selectors. - * - * @param acceptorThreadFactory factory to create acceptor selector threads - * @param dedicatedAcceptorCount the number of dedicated acceptor selectors to be created - * @param selectorThreadFactory factory to create non-acceptor selector threads - * @param selectorCount the number of non-acceptor selectors to be created - * @param eventHandlerFunction function for creating event handlers - * @throws IOException occurs if there is a problem while opening a java.nio.Selector + * Opens a outgoing client channel. */ - public NioGroup(ThreadFactory acceptorThreadFactory, int dedicatedAcceptorCount, ThreadFactory selectorThreadFactory, int selectorCount, - Function, EventHandler> eventHandlerFunction) throws IOException { - dedicatedAcceptors = new ArrayList<>(dedicatedAcceptorCount); - selectors = new ArrayList<>(selectorCount); - - try { - List> suppliersToSet = new ArrayList<>(selectorCount); - for (int i = 0; i < selectorCount; ++i) { - RoundRobinSupplier supplier = new RoundRobinSupplier<>(); - suppliersToSet.add(supplier); - NioSelector selector = new NioSelector(eventHandlerFunction.apply(supplier)); - selectors.add(selector); - } - for (RoundRobinSupplier supplierToSet : suppliersToSet) { - supplierToSet.setSelectors(selectors.toArray(new NioSelector[0])); - assert supplierToSet.count() == selectors.size() : "Supplier should have same count as selector list."; - } - - for (int i = 0; i < dedicatedAcceptorCount; ++i) { - RoundRobinSupplier supplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); - NioSelector acceptor = new NioSelector(eventHandlerFunction.apply(supplier)); - dedicatedAcceptors.add(acceptor); - } - - if (dedicatedAcceptorCount != 0) { - acceptorSupplier = new RoundRobinSupplier<>(dedicatedAcceptors.toArray(new NioSelector[0])); - } else { - acceptorSupplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); - } - selectorSupplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); - assert selectorCount == selectors.size() : "We need to have created all the selectors at this point."; - assert dedicatedAcceptorCount == dedicatedAcceptors.size() : "We need to have created all the acceptors at this point."; - - startSelectors(selectors, selectorThreadFactory); - startSelectors(dedicatedAcceptors, acceptorThreadFactory); - } catch (Exception e) { - try { - close(); - } catch (Exception e1) { - e.addSuppressed(e1); - } - throw e; - } - } - - public S bindServerChannel(InetSocketAddress address, ChannelFactory factory) - throws IOException { - ensureOpen(); - return factory.openNioServerSocketChannel(address, acceptorSupplier); - } - - public S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException { - ensureOpen(); - return factory.openNioChannel(address, selectorSupplier); - } + S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException; @Override - public void close() throws IOException { - if (isOpen.compareAndSet(true, false)) { - List toClose = Stream.concat(dedicatedAcceptors.stream(), selectors.stream()).collect(Collectors.toList()); - List closingExceptions = new ArrayList<>(); - for (NioSelector selector : toClose) { - try { - selector.close(); - } catch (IOException e) { - closingExceptions.add(e); - } - } - ExceptionsHelper.rethrowAndSuppress(closingExceptions); - } - } - - private static void startSelectors(Iterable selectors, ThreadFactory threadFactory) { - for (NioSelector selector : selectors) { - if (selector.isRunning() == false) { - threadFactory.newThread(selector::runLoop).start(); - try { - selector.isRunningFuture().get(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IllegalStateException("Interrupted while waiting for selector to start.", e); - } catch (ExecutionException e) { - if (e.getCause() instanceof RuntimeException) { - throw (RuntimeException) e.getCause(); - } else { - throw new RuntimeException("Exception during selector start.", e); - } - } - } - } - } - - private void ensureOpen() { - if (isOpen.get() == false) { - throw new IllegalStateException("NioGroup is closed."); - } - } + void close() throws IOException; } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/NioSelectorGroup.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelectorGroup.java new file mode 100644 index 0000000000000..bb4d4d14a0c07 --- /dev/null +++ b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelectorGroup.java @@ -0,0 +1,181 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import org.elasticsearch.nio.utils.ExceptionsHelper; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * The NioSelectorGroup is a group of selectors for interfacing with java nio. When it is started it will create the + * configured number of selectors. Each selector will be running in a dedicated thread. Server connections + * can be bound using the {@link #bindServerChannel(InetSocketAddress, ChannelFactory)} method. Client + * connections can be opened using the {@link #openChannel(InetSocketAddress, ChannelFactory)} method. + *

+ * The logic specific to a particular channel is provided by the {@link ChannelFactory} passed to the method + * when the channel is created. This is what allows an NioSelectorGroup to support different channel types. + */ +public class NioSelectorGroup implements NioGroup { + + + private final List dedicatedAcceptors; + private final RoundRobinSupplier acceptorSupplier; + + private final List selectors; + private final RoundRobinSupplier selectorSupplier; + + private final AtomicBoolean isOpen = new AtomicBoolean(true); + + /** + * This will create an NioSelectorGroup with no dedicated acceptors. All server channels will be handled by the + * same selectors that are handling child channels. + * + * @param threadFactory factory to create selector threads + * @param selectorCount the number of selectors to be created + * @param eventHandlerFunction function for creating event handlers + * @throws IOException occurs if there is a problem while opening a java.nio.Selector + */ + public NioSelectorGroup(ThreadFactory threadFactory, int selectorCount, + Function, EventHandler> eventHandlerFunction) throws IOException { + this(null, 0, threadFactory, selectorCount, eventHandlerFunction); + } + + /** + * This will create an NioSelectorGroup with dedicated acceptors. All server channels will be handled by a group + * of selectors dedicated to accepting channels. These accepted channels will be handed off the + * non-server selectors. + * + * @param acceptorThreadFactory factory to create acceptor selector threads + * @param dedicatedAcceptorCount the number of dedicated acceptor selectors to be created + * @param selectorThreadFactory factory to create non-acceptor selector threads + * @param selectorCount the number of non-acceptor selectors to be created + * @param eventHandlerFunction function for creating event handlers + * @throws IOException occurs if there is a problem while opening a java.nio.Selector + */ + public NioSelectorGroup(ThreadFactory acceptorThreadFactory, int dedicatedAcceptorCount, ThreadFactory selectorThreadFactory, + int selectorCount, Function, EventHandler> eventHandlerFunction) throws IOException { + dedicatedAcceptors = new ArrayList<>(dedicatedAcceptorCount); + selectors = new ArrayList<>(selectorCount); + + try { + List> suppliersToSet = new ArrayList<>(selectorCount); + for (int i = 0; i < selectorCount; ++i) { + RoundRobinSupplier supplier = new RoundRobinSupplier<>(); + suppliersToSet.add(supplier); + NioSelector selector = new NioSelector(eventHandlerFunction.apply(supplier)); + selectors.add(selector); + } + for (RoundRobinSupplier supplierToSet : suppliersToSet) { + supplierToSet.setSelectors(selectors.toArray(new NioSelector[0])); + assert supplierToSet.count() == selectors.size() : "Supplier should have same count as selector list."; + } + + for (int i = 0; i < dedicatedAcceptorCount; ++i) { + RoundRobinSupplier supplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); + NioSelector acceptor = new NioSelector(eventHandlerFunction.apply(supplier)); + dedicatedAcceptors.add(acceptor); + } + + if (dedicatedAcceptorCount != 0) { + acceptorSupplier = new RoundRobinSupplier<>(dedicatedAcceptors.toArray(new NioSelector[0])); + } else { + acceptorSupplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); + } + selectorSupplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); + assert selectorCount == selectors.size() : "We need to have created all the selectors at this point."; + assert dedicatedAcceptorCount == dedicatedAcceptors.size() : "We need to have created all the acceptors at this point."; + + startSelectors(selectors, selectorThreadFactory); + startSelectors(dedicatedAcceptors, acceptorThreadFactory); + } catch (Exception e) { + try { + close(); + } catch (Exception e1) { + e.addSuppressed(e1); + } + throw e; + } + } + + @Override + public S bindServerChannel(InetSocketAddress address, ChannelFactory factory) + throws IOException { + ensureOpen(); + return factory.openNioServerSocketChannel(address, acceptorSupplier); + } + + @Override + public S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException { + ensureOpen(); + return factory.openNioChannel(address, selectorSupplier); + } + + @Override + public void close() throws IOException { + if (isOpen.compareAndSet(true, false)) { + List toClose = Stream.concat(dedicatedAcceptors.stream(), selectors.stream()).collect(Collectors.toList()); + List closingExceptions = new ArrayList<>(); + for (NioSelector selector : toClose) { + try { + selector.close(); + } catch (IOException e) { + closingExceptions.add(e); + } + } + ExceptionsHelper.rethrowAndSuppress(closingExceptions); + } + } + + private static void startSelectors(Iterable selectors, ThreadFactory threadFactory) { + for (NioSelector selector : selectors) { + if (selector.isRunning() == false) { + threadFactory.newThread(selector::runLoop).start(); + try { + selector.isRunningFuture().get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException("Interrupted while waiting for selector to start.", e); + } catch (ExecutionException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else { + throw new RuntimeException("Exception during selector start.", e); + } + } + } + } + } + + private void ensureOpen() { + if (isOpen.get() == false) { + throw new IllegalStateException("NioGroup is closed."); + } + } +} diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorGroupTests.java similarity index 88% rename from libs/nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java rename to libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorGroupTests.java index 027f1255a590c..34a8d501ea316 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorGroupTests.java @@ -30,16 +30,16 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.mockito.Mockito.mock; -public class NioGroupTests extends ESTestCase { +public class NioSelectorGroupTests extends ESTestCase { - private NioGroup nioGroup; + private NioSelectorGroup nioGroup; @Override @SuppressWarnings("unchecked") public void setUp() throws Exception { super.setUp(); - nioGroup = new NioGroup(daemonThreadFactory(Settings.EMPTY, "acceptor"), 1, daemonThreadFactory(Settings.EMPTY, "selector"), 1, - (s) -> new EventHandler(mock(Consumer.class), s)); + nioGroup = new NioSelectorGroup(daemonThreadFactory(Settings.EMPTY, "acceptor"), 1, + daemonThreadFactory(Settings.EMPTY, "selector"), 1, (s) -> new EventHandler(mock(Consumer.class), s)); } @Override @@ -74,7 +74,7 @@ public void testCanCloseTwice() throws IOException { @SuppressWarnings("unchecked") public void testExceptionAtStartIsHandled() throws IOException { RuntimeException ex = new RuntimeException(); - CheckedRunnable ctor = () -> new NioGroup(r -> {throw ex;}, 1, + CheckedRunnable ctor = () -> new NioSelectorGroup(r -> {throw ex;}, 1, daemonThreadFactory(Settings.EMPTY, "selector"), 1, (s) -> new EventHandler(mock(Consumer.class), s)); RuntimeException runtimeException = expectThrows(RuntimeException.class, ctor::run); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeTests.java index 9eb32767eb7c8..5484f7f1f9c6b 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.geoip; +import org.apache.lucene.util.Constants; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; @@ -92,6 +93,7 @@ protected Settings nodeSettings(final int nodeOrdinal) { * @throws IOException if an I/O exception occurs building the JSON */ public void testLazyLoading() throws IOException { + assumeFalse("https://github.com/elastic/elasticsearch/issues/37342", Constants.WINDOWS); final BytesReference bytes; try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.startObject(); diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..fa128372b2467 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +c84431b751d851f484f2799f6dcb9110113b1958 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 6f055c7ac5eda..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -825a0eb2b9ff58df36753971217cba80466d7132 \ No newline at end of file diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java index 28d32f50bfc69..bc24789341e04 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java @@ -36,12 +36,12 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.InboundMessage; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportSettings; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -111,13 +111,9 @@ public ExceptionThrowingNetty4Transport( } @Override - protected String handleRequest(TcpChannel channel, String profileName, - StreamInput stream, long requestId, int messageLengthBytes, Version version, - InetSocketAddress remoteAddress, byte status) throws IOException { - String action = super.handleRequest(channel, profileName, stream, requestId, messageLengthBytes, version, - remoteAddress, status); + protected void handleRequest(TcpChannel channel, InboundMessage.RequestMessage request, int messageLengthBytes) throws IOException { + super.handleRequest(channel, request, messageLengthBytes); channelProfileName = TransportSettings.DEFAULT_PROFILE; - return action; } @Override diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..927a881df73f1 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +ea440f53a9e858c2ed87927c63d57eb70e5af9ee \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 4d7a031054064..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5b0beeaa95c28e5e0679d684d4e2e30e90cf53e7 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..075bd24f3e609 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +b016fdfb8f1ac413d902cd4d244b295c9f01e610 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 0c588e1607a0d..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fff58bb761b71ded4bf1bfd41bad522df5c67f5c \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..d5046efc95935 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +395c8291751ffa7fbbb96faf578408a33a34ad1d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 78a9e33ed22e1..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -167a2b06379c4a1233a866ea88df17bb481e7200 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..19a0a9a198ce0 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +6957c71604356a1fbf0e13794595e4ea42126dd7 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 1ea10265dcfef..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -77a1ed8e4544c31b7c949c9f0ddb7bc2a53adfb9 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..91597e3941158 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +09f42235a8daca4ca8ea604314c2eb9de51b9e98 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 0e69798085eb1..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49b825fc84de3f993eb161d1a38fdeefa9b5511a \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..a868f7d5a053f --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +7cf7eeb685a2060e97f50551236cfcaf39990083 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 85d83e7674670..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c60a0b708cd1c61ec34191df1bcf99b9211c08f \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..4f5f80aa96b91 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +ff163fb06ec3e47d501b290d8f69cca88a0341cc \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index eab26671dcaa3..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -553be14e6c3bb82b7e70f76ce2d294e4fa26fc20 \ No newline at end of file diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index 9e42799b4da03..d68a9ee397645 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -32,7 +32,7 @@ dependencies { } if (System.getProperty('tests.distribution') == null) { - integTestCluster.distribution = 'oss-zip' + integTestCluster.distribution = 'oss' } unitTest.enabled = false diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java index aa77a189cb78e..a5f274c7ccd34 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -26,13 +26,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.AbstractHttpServerTransport; import org.elasticsearch.http.HttpChannel; @@ -41,7 +39,6 @@ import org.elasticsearch.http.nio.cors.NioCorsConfigBuilder; import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.ChannelFactory; -import org.elasticsearch.nio.EventHandler; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSelector; @@ -50,6 +47,7 @@ import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.nio.NioGroupFactory; import java.io.IOException; import java.net.InetSocketAddress; @@ -61,8 +59,6 @@ import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; -import static org.elasticsearch.common.settings.Setting.intSetting; -import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; @@ -83,15 +79,9 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { private static final Logger logger = LogManager.getLogger(NioHttpServerTransport.class); - public static final Setting NIO_HTTP_ACCEPTOR_COUNT = - intSetting("http.nio.acceptor_count", 1, 1, Setting.Property.NodeScope); - public static final Setting NIO_HTTP_WORKER_COUNT = - new Setting<>("http.nio.worker_count", - (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), - (s) -> Setting.parseInt(s, 1, "http.nio.worker_count"), Setting.Property.NodeScope); - protected final PageCacheRecycler pageCacheRecycler; protected final NioCorsConfig corsConfig; + private final NioGroupFactory nioGroupFactory; protected final boolean tcpNoDelay; protected final boolean tcpKeepAlive; @@ -99,14 +89,15 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { protected final int tcpSendBufferSize; protected final int tcpReceiveBufferSize; - private NioGroup nioGroup; + private volatile NioGroup nioGroup; private ChannelFactory channelFactory; public NioHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, PageCacheRecycler pageCacheRecycler, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, - Dispatcher dispatcher) { + Dispatcher dispatcher, NioGroupFactory nioGroupFactory) { super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher); this.pageCacheRecycler = pageCacheRecycler; + this.nioGroupFactory = nioGroupFactory; ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); @@ -134,11 +125,7 @@ public Logger getLogger() { protected void doStart() { boolean success = false; try { - int acceptorCount = NIO_HTTP_ACCEPTOR_COUNT.get(settings); - int workerCount = NIO_HTTP_WORKER_COUNT.get(settings); - nioGroup = new NioGroup(daemonThreadFactory(this.settings, HTTP_SERVER_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, - daemonThreadFactory(this.settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX), workerCount, - (s) -> new EventHandler(this::onNonChannelException, s)); + nioGroup = nioGroupFactory.getHttpGroup(); channelFactory = channelFactory(); bindServer(); success = true; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioGroupFactory.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioGroupFactory.java new file mode 100644 index 0000000000000..0c17c1d8b85e5 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioGroupFactory.java @@ -0,0 +1,172 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.nio.ChannelFactory; +import org.elasticsearch.nio.EventHandler; +import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioSelectorGroup; +import org.elasticsearch.nio.NioServerSocketChannel; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.transport.TcpTransport; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.InetSocketAddress; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; + +/** + * Creates and returns {@link NioSelectorGroup} instances. It will return a shared group for + * both {@link #getHttpGroup()} and {@link #getTransportGroup()} if + * {@link NioTransportPlugin#NIO_HTTP_WORKER_COUNT} is configured to be 0. If that setting is not 0, then it + * will return a different group in the {@link #getHttpGroup()} call. + */ +public final class NioGroupFactory { + + private final Logger logger; + private final Settings settings; + private final int httpWorkerCount; + + private RefCountedNioGroup refCountedGroup; + + public NioGroupFactory(Settings settings, Logger logger) { + this.logger = logger; + this.settings = settings; + this.httpWorkerCount = NioTransportPlugin.NIO_HTTP_WORKER_COUNT.get(settings); + } + + public Settings getSettings() { + return settings; + } + + public synchronized NioGroup getTransportGroup() throws IOException { + return getGenericGroup(); + } + + public synchronized NioGroup getHttpGroup() throws IOException { + if (httpWorkerCount == 0) { + return getGenericGroup(); + } else { + return new NioSelectorGroup(daemonThreadFactory(this.settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX), + httpWorkerCount, (s) -> new EventHandler(this::onException, s)); + } + } + + private NioGroup getGenericGroup() throws IOException { + if (refCountedGroup == null) { + ThreadFactory threadFactory = daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX); + NioSelectorGroup nioGroup = new NioSelectorGroup(threadFactory, NioTransportPlugin.NIO_WORKER_COUNT.get(settings), + (s) -> new EventHandler(this::onException, s)); + this.refCountedGroup = new RefCountedNioGroup(nioGroup); + return new WrappedNioGroup(refCountedGroup); + } else { + refCountedGroup.incRef(); + return new WrappedNioGroup(refCountedGroup); + } + } + + private void onException(Exception exception) { + logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), + exception); + } + + private static class RefCountedNioGroup extends AbstractRefCounted implements NioGroup { + + public static final String NAME = "ref-counted-nio-group"; + private final NioSelectorGroup nioGroup; + + private RefCountedNioGroup(NioSelectorGroup nioGroup) { + super(NAME); + this.nioGroup = nioGroup; + } + + @Override + protected void closeInternal() { + try { + nioGroup.close(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + public S bindServerChannel(InetSocketAddress address, ChannelFactory factory) + throws IOException { + return nioGroup.bindServerChannel(address, factory); + } + + @Override + public S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException { + return nioGroup.openChannel(address, factory); + } + + @Override + public void close() throws IOException { + throw new UnsupportedOperationException("Should not close. Instead use decRef call."); + } + } + + /** + * Wraps the {@link RefCountedNioGroup}. Calls {@link RefCountedNioGroup#decRef()} on close. After close, + * this wrapped instance can no longer be used. + */ + private class WrappedNioGroup implements NioGroup { + + private final RefCountedNioGroup refCountedNioGroup; + + private final AtomicBoolean isOpen = new AtomicBoolean(true); + + private WrappedNioGroup(RefCountedNioGroup refCountedNioGroup) { + this.refCountedNioGroup = refCountedNioGroup; + } + + public S bindServerChannel(InetSocketAddress address, ChannelFactory factory) + throws IOException { + ensureOpen(); + return refCountedNioGroup.bindServerChannel(address, factory); + } + + public S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException { + ensureOpen(); + return refCountedNioGroup.openChannel(address, factory); + } + + @Override + public void close() { + if (isOpen.compareAndSet(true, false)) { + refCountedNioGroup.decRef(); + } + } + + private void ensureOpen() { + if (isOpen.get() == false) { + throw new IllegalStateException("NioGroup is closed."); + } + } + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index c5f5b414c07c9..a9a1c716ef82a 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -27,14 +27,11 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.ChannelFactory; -import org.elasticsearch.nio.EventHandler; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSelector; @@ -54,25 +51,21 @@ import java.util.function.Supplier; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; -import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; public class NioTransport extends TcpTransport { private static final Logger logger = LogManager.getLogger(NioTransport.class); - public static final Setting NIO_WORKER_COUNT = - new Setting<>("transport.nio.worker_count", - (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), - (s) -> Setting.parseInt(s, 1, "transport.nio.worker_count"), Setting.Property.NodeScope); - private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); + private final NioGroupFactory groupFactory; private volatile NioGroup nioGroup; private volatile Function clientChannelFactory; protected NioTransport(Settings settings, Version version, ThreadPool threadPool, NetworkService networkService, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, - CircuitBreakerService circuitBreakerService) { + CircuitBreakerService circuitBreakerService, NioGroupFactory groupFactory) { super("nio", settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + this.groupFactory = groupFactory; } @Override @@ -91,8 +84,7 @@ protected NioTcpChannel initiateChannel(DiscoveryNode node) throws IOException { protected void doStart() { boolean success = false; try { - nioGroup = new NioGroup(daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX), - NioTransport.NIO_WORKER_COUNT.get(settings), (s) -> new EventHandler(this::onNonChannelException, s)); + nioGroup = groupFactory.getTransportGroup(); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); clientChannelFactory = clientChannelFactoryFunction(clientProfileSettings); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java index 9c73cbc5a662a..647b53ad47a59 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java @@ -19,6 +19,9 @@ package org.elasticsearch.transport.nio; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -26,6 +29,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.nio.NioHttpServerTransport; @@ -41,17 +45,29 @@ import java.util.Map; import java.util.function.Supplier; +import static org.elasticsearch.common.settings.Setting.intSetting; + public class NioTransportPlugin extends Plugin implements NetworkPlugin { public static final String NIO_TRANSPORT_NAME = "nio-transport"; public static final String NIO_HTTP_TRANSPORT_NAME = "nio-http-transport"; + private static final Logger logger = LogManager.getLogger(NioTransportPlugin.class); + + public static final Setting NIO_WORKER_COUNT = + new Setting<>("transport.nio.worker_count", + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), + (s) -> Setting.parseInt(s, 1, "transport.nio.worker_count"), Setting.Property.NodeScope); + public static final Setting NIO_HTTP_WORKER_COUNT = + intSetting("http.nio.worker_count", 0, 0, Setting.Property.NodeScope); + + private final SetOnce groupFactory = new SetOnce<>(); + @Override public List> getSettings() { return Arrays.asList( - NioHttpServerTransport.NIO_HTTP_ACCEPTOR_COUNT, - NioHttpServerTransport.NIO_HTTP_WORKER_COUNT, - NioTransport.NIO_WORKER_COUNT + NIO_HTTP_WORKER_COUNT, + NIO_WORKER_COUNT ); } @@ -61,7 +77,7 @@ public Map> getTransports(Settings settings, ThreadP NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { return Collections.singletonMap(NIO_TRANSPORT_NAME, () -> new NioTransport(settings, Version.CURRENT, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, - circuitBreakerService)); + circuitBreakerService, getNioGroupFactory(settings))); } @Override @@ -73,6 +89,17 @@ public Map> getHttpTransports(Settings set HttpServerTransport.Dispatcher dispatcher) { return Collections.singletonMap(NIO_HTTP_TRANSPORT_NAME, () -> new NioHttpServerTransport(settings, networkService, bigArrays, pageCacheRecycler, threadPool, xContentRegistry, - dispatcher)); + dispatcher, getNioGroupFactory(settings))); + } + + private synchronized NioGroupFactory getNioGroupFactory(Settings settings) { + NioGroupFactory nioGroupFactory = groupFactory.get(); + if (nioGroupFactory != null) { + assert nioGroupFactory.getSettings().equals(settings) : "Different settings than originally provided"; + return nioGroupFactory; + } else { + groupFactory.set(new NioGroupFactory(settings, logger)); + return groupFactory.get(); + } } } diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java index 703f7acbf8257..9ed64213cbf5f 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java @@ -20,10 +20,8 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.http.nio.NioHttpServerTransport; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.transport.nio.NioTransport; import org.elasticsearch.transport.nio.NioTransportPlugin; import java.util.Collection; @@ -46,8 +44,8 @@ protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); // randomize nio settings if (randomBoolean()) { - builder.put(NioTransport.NIO_WORKER_COUNT.getKey(), random().nextInt(3) + 1); - builder.put(NioHttpServerTransport.NIO_HTTP_WORKER_COUNT.getKey(), random().nextInt(3) + 1); + builder.put(NioTransportPlugin.NIO_WORKER_COUNT.getKey(), random().nextInt(3) + 1); + builder.put(NioTransportPlugin.NIO_HTTP_WORKER_COUNT.getKey(), random().nextInt(3) + 1); } builder.put(NetworkModule.TRANSPORT_TYPE_KEY, NioTransportPlugin.NIO_TRANSPORT_NAME); builder.put(NetworkModule.HTTP_TYPE_KEY, NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java index b49d5b5186628..e3259b10b9745 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java @@ -45,7 +45,7 @@ import org.elasticsearch.nio.EventHandler; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; -import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioSelectorGroup; import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; @@ -88,11 +88,11 @@ static Collection returnOpaqueIds(Collection responses private static final Logger logger = LogManager.getLogger(NioHttpClient.class); - private final NioGroup nioGroup; + private final NioSelectorGroup nioGroup; NioHttpClient() { try { - nioGroup = new NioGroup(daemonThreadFactory(Settings.EMPTY, "nio-http-client"), 1, + nioGroup = new NioSelectorGroup(daemonThreadFactory(Settings.EMPTY, "nio-http-client"), 1, (s) -> new EventHandler(this::onException, s)); } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java index 13b8e60336e02..2ffd5a64147cc 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.nio.NioGroupFactory; import org.junit.After; import org.junit.Before; @@ -202,7 +203,7 @@ public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadC } }; try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, threadPool, - xContentRegistry(), dispatcher)) { + xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger))) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); try (NioHttpClient client = new NioHttpClient()) { @@ -235,12 +236,12 @@ public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadC public void testBindUnavailableAddress() { try (NioHttpServerTransport transport = new NioHttpServerTransport(Settings.EMPTY, networkService, bigArrays, pageRecycler, - threadPool, xContentRegistry(), new NullDispatcher())) { + threadPool, xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger))) { transport.start(); TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); try (NioHttpServerTransport otherTransport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, - threadPool, xContentRegistry(), new NullDispatcher())) { + threadPool, xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger))) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); } @@ -284,7 +285,7 @@ public void dispatchBadRequest(final RestRequest request, } try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, - threadPool, xContentRegistry(), dispatcher)) { + threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger))) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioGroupFactoryTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioGroupFactoryTests.java new file mode 100644 index 0000000000000..356eb39b73449 --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioGroupFactoryTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.nio.ChannelFactory; +import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioSelector; +import org.elasticsearch.nio.NioServerSocketChannel; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.ServerChannelContext; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.function.Consumer; + +public class NioGroupFactoryTests extends ESTestCase { + + public void testSharedGroupStillWorksWhenOneInstanceClosed() throws IOException { + NioGroupFactory groupFactory = new NioGroupFactory(Settings.EMPTY, logger); + + InetSocketAddress inetSocketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + NioGroup httpGroup = groupFactory.getHttpGroup(); + try { + NioGroup transportGroup = groupFactory.getTransportGroup(); + transportGroup.close(); + expectThrows(IllegalStateException.class, () -> transportGroup.bindServerChannel(inetSocketAddress, new BindingFactory())); + + httpGroup.bindServerChannel(inetSocketAddress, new BindingFactory()); + } finally { + httpGroup.close(); + } + expectThrows(IllegalStateException.class, () -> httpGroup.bindServerChannel(inetSocketAddress, new BindingFactory())); + } + + private static class BindingFactory extends ChannelFactory { + + private BindingFactory() { + super(new ChannelFactory.RawChannelFactory(false, false, false, -1, -1)); + } + + @Override + public NioSocketChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { + throw new IOException("boom"); + } + + @Override + public NioServerSocketChannel createServerChannel(NioSelector selector, ServerSocketChannel channel) throws IOException { + NioServerSocketChannel nioChannel = new NioServerSocketChannel(channel); + Consumer exceptionHandler = (e) -> {}; + Consumer acceptor = (c) -> {}; + ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, acceptor, exceptionHandler); + nioChannel.setContext(context); + return nioChannel; + } + } +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java index 61f07aa016207..087c3758bb98b 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.transport.nio; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.NioIntegTestCase; import org.elasticsearch.Version; @@ -36,12 +38,12 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.InboundMessage; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportSettings; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -54,6 +56,7 @@ @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) public class NioTransportIT extends NioIntegTestCase { + // static so we can use it in anonymous classes private static String channelProfileName = null; @@ -86,6 +89,8 @@ public void testThatConnectionFailsAsIntended() throws Exception { public static final class ExceptionThrowingNioTransport extends NioTransport { + private static final Logger logger = LogManager.getLogger(ExceptionThrowingNioTransport.class); + public static class TestPlugin extends Plugin implements NetworkPlugin { @Override @@ -103,17 +108,14 @@ public Map> getTransports(Settings settings, ThreadP ExceptionThrowingNioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { - super(settings, Version.CURRENT, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService); + super(settings, Version.CURRENT, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, + new NioGroupFactory(settings, logger)); } @Override - protected String handleRequest(TcpChannel channel, String profileName, - StreamInput stream, long requestId, int messageLengthBytes, Version version, - InetSocketAddress remoteAddress, byte status) throws IOException { - String action = super.handleRequest(channel, profileName, stream, requestId, messageLengthBytes, version, - remoteAddress, status); + protected void handleRequest(TcpChannel channel, InboundMessage.RequestMessage request, int messageLengthBytes) throws IOException { + super.handleRequest(channel, request, messageLengthBytes); channelProfileName = TransportSettings.DEFAULT_PROFILE; - return action; } @Override diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java index 82a991036270d..a78e924298a9e 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java @@ -52,12 +52,12 @@ public class SimpleNioTransportTests extends AbstractSimpleTransportTestCase { - public static MockTransportService nioFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, + public MockTransportService nioFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); NetworkService networkService = new NetworkService(Collections.emptyList()); Transport transport = new NioTransport(settings, version, threadPool, networkService, new MockPageCacheRecycler(settings), - namedWriteableRegistry, new NoneCircuitBreakerService()) { + namedWriteableRegistry, new NoneCircuitBreakerService(), new NioGroupFactory(settings, logger)) { @Override public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionProfile profile, diff --git a/qa/build.gradle b/qa/build.gradle index 0336b947d06aa..cbcb1d4580704 100644 --- a/qa/build.gradle +++ b/qa/build.gradle @@ -4,8 +4,8 @@ import org.elasticsearch.gradle.test.RestIntegTestTask subprojects { Project subproj -> subproj.tasks.withType(RestIntegTestTask) { subproj.extensions.configure("${it.name}Cluster") { cluster -> - cluster.distribution = System.getProperty('tests.distribution', 'oss-zip') - if (cluster.distribution == 'zip') { + cluster.distribution = System.getProperty('tests.distribution', 'oss') + if (cluster.distribution == 'default') { /* * Add Elastic's repositories so we can resolve older versions of the * default distribution. Those aren't in maven central. diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java index 1704deada1762..ba1cd80fc88a3 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java @@ -30,13 +30,13 @@ /** * Basic tests for simple xpack functionality that are only run if the - * cluster is the on the "zip" distribution. + * cluster is the on the default distribution. */ public class XPackIT extends AbstractRollingTestCase { @Before public void skipIfNotXPack() { assumeThat("test is only supported if the distribution contains xpack", - System.getProperty("tests.distribution"), equalTo("zip")); + System.getProperty("tests.distribution"), equalTo("default")); assumeThat("running this on the unupgraded cluster would change its state and it wouldn't work prior to 6.3 anyway", CLUSTER_TYPE, equalTo(ClusterType.UPGRADED)); /* diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 9d299e16f0210..cd64262fcdce1 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -29,7 +29,7 @@ integTestCluster { } integTestRunner { - if ('zip'.equals(integTestCluster.distribution)) { + if ('default'.equals(integTestCluster.distribution)) { systemProperty 'tests.rest.blacklist', [ 'cat.templates/10_basic/No templates', 'cat.templates/10_basic/Sort templates', diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index d4b04ce25110b..a9b6639359e24 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -316,6 +316,8 @@ Supports also regular expressions with flag X for more readability (accepts whit \d+ \s+ \d{2}:\d{2}:\d{2} \s+ \d+ \s+ \n $/ .... +**Note:** `$body` is used to refer to the last obtained response body as a string, while `''` refers to the parsed representation (parsed into a Map by the Java runner for instance). Having the raw string response is for example useful when testing cat APIs. + === `lt` and `gt` Compares two numeric values, eg: diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..4dc7f4b4ca6bf --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +f4d286ed7940fc206331424e8ce577584208eba3 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index be6bfec6e5563..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -66e4d1a3f91be88d903b1c75e71c8b15a7dc4f90 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..df0783e11fee6 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +e5416da7370f5def9a79fb1cccb091b091b808a5 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 357ce92760e39..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -069cfb0c693d365cbf332973ab796ba33646f867 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..7a4b68f8d857d --- /dev/null +++ b/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +e3a95ff3cbd96e2c05b90932c20ca6374cdcdbe9 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index efbb9ada534a5..0000000000000 --- a/server/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad32720fe677becb93a26692338b63754613aa50 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..e1933653b9f4d --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +045dcae6368a436cec6e56f654a9c6b3a4656d17 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 7f35ad20b8230..0000000000000 --- a/server/licenses/lucene-grouping-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f9499ffc5e956f7a113308198e74e80b7df290b \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..85aff2f06dd49 --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +63b7164d24547cea36df3d81eedb9e8b424cf3c2 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 566114f72948f..0000000000000 --- a/server/licenses/lucene-highlighter-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -162cac09191f267039cdb73ccc7face5ef54ba8b \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..41b3910da1319 --- /dev/null +++ b/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +7b3358889c491237709e8aea4a39e816227f3d26 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index baa09c01bed13..0000000000000 --- a/server/licenses/lucene-join-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -106cbbc96feb7413b17539f8a0ae2e7692550f44 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..22318a34bf09e --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +5d9566e2b3be41c81fe42df1c57fff31fcdbc565 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index ba3d8ec94d0ac..0000000000000 --- a/server/licenses/lucene-memory-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -37336dec582ce1569e944d1d8a5181c2eb2aec25 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..2c4429c8c7619 --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +45574e22c4b1569a2aecd5b2837388c7287892b5 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 764fffadffe99..0000000000000 --- a/server/licenses/lucene-misc-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c18617a95c109160d0dacb58a1e268014f7f5862 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..ae11e5858a28c --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +67405660330db8c09e0994a615cf3ab95d7bc151 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index e50a454a0ebd9..0000000000000 --- a/server/licenses/lucene-queries-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab86036efd74fc41730e20dd2d3de977297878e0 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..fcb52584b667c --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +c8de64bf2c4f09766d4cd62e8dd403016665f37c \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 8b2d290d647ed..0000000000000 --- a/server/licenses/lucene-queryparser-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02f3f472494f250da6fe7199de6c2f2ef5972774 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..80750b2d41e33 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +fc080b45b881a78a23743df9a7388fd3dbbb9d66 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 2466df430847f..0000000000000 --- a/server/licenses/lucene-sandbox-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da0a248a2bb69499715411b682d45adaea5ab499 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..32f5ca7196e17 --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +6e0e12b9882005b87ef39325e1fc8539c8caff31 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index c8b8762f25a92..0000000000000 --- a/server/licenses/lucene-spatial-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8ed0aad4c4214d0fe3571dfa2d09c936a91cf3c7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..2cf474908d1ba --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +7ed9c9a03e1a15840237d09ca4da7aadfce1c780 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 17b2f3ef4a33c..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9105a1c73feeb836ca4244367f02d6a8d7e3cc27 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..c4016fb692e2f --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +fdf0efdeb9a73a6b0f8349df21049ecbc73955d7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 62d3f24344e33..0000000000000 --- a/server/licenses/lucene-spatial3d-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6ebc95227d4415cc6d345c1dd3759e1f348e0ca4 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..7ea28b0ed87da --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +c69399183d5f9f85f3f8130452d0bed62fd92440 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 8471aff350b83..0000000000000 --- a/server/licenses/lucene-suggest-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4cf1b911521f962c9bd2d28efb519bf9e1b88f4 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 530c5ce4f6396..1fc5ef474ee04 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1009,10 +1009,8 @@ private enum ElasticsearchExceptionHandle { MultiBucketConsumerService.TooManyBucketsException::new, 149, Version.V_7_0_0), COORDINATION_STATE_REJECTED_EXCEPTION(org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, Version.V_7_0_0), - CLUSTER_ALREADY_BOOTSTRAPPED_EXCEPTION(org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException.class, - org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException::new, 151, Version.V_7_0_0), SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class, - org.elasticsearch.snapshots.SnapshotInProgressException::new, 152, Version.V_7_0_0); + org.elasticsearch.snapshots.SnapshotInProgressException::new, 151, Version.V_7_0_0); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index a3d3c615162dc..142aa6bde74b6 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -23,10 +23,6 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesAction; -import org.elasticsearch.action.admin.cluster.bootstrap.TransportBootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.TransportGetDiscoveredNodesAction; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; @@ -433,8 +429,6 @@ public void reg actions.register(GetTaskAction.INSTANCE, TransportGetTaskAction.class); actions.register(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class); - actions.register(GetDiscoveredNodesAction.INSTANCE, TransportGetDiscoveredNodesAction.class); - actions.register(BootstrapClusterAction.INSTANCE, TransportBootstrapClusterAction.class); actions.register(AddVotingConfigExclusionsAction.INSTANCE, TransportAddVotingConfigExclusionsAction.class); actions.register(ClearVotingConfigExclusionsAction.INSTANCE, TransportClearVotingConfigExclusionsAction.class); actions.register(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterAction.java deleted file mode 100644 index 28a8e580cedc4..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterAction.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.Action; -import org.elasticsearch.common.io.stream.Writeable.Reader; - -public class BootstrapClusterAction extends Action { - public static final BootstrapClusterAction INSTANCE = new BootstrapClusterAction(); - public static final String NAME = "cluster:admin/bootstrap/set_voting_config"; - - private BootstrapClusterAction() { - super(NAME); - } - - @Override - public BootstrapClusterResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public Reader getResponseReader() { - return BootstrapClusterResponse::new; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequest.java deleted file mode 100644 index f8d0bcb13a58f..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * Request to set the initial configuration of master-eligible nodes in a cluster so that the very first master election can take place. - */ -public class BootstrapClusterRequest extends ActionRequest { - private final BootstrapConfiguration bootstrapConfiguration; - - public BootstrapClusterRequest(BootstrapConfiguration bootstrapConfiguration) { - this.bootstrapConfiguration = bootstrapConfiguration; - } - - public BootstrapClusterRequest(StreamInput in) throws IOException { - super(in); - bootstrapConfiguration = new BootstrapConfiguration(in); - } - - /** - * @return the bootstrap configuration: the initial set of master-eligible nodes whose votes are counted in elections. - */ - public BootstrapConfiguration getBootstrapConfiguration() { - return bootstrapConfiguration; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - bootstrapConfiguration.writeTo(out); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponse.java deleted file mode 100644 index 2576409a3cef1..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponse.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * Response to a {@link BootstrapClusterRequest} indicating that the cluster has been successfully bootstrapped. - */ -public class BootstrapClusterResponse extends ActionResponse { - private final boolean alreadyBootstrapped; - - public BootstrapClusterResponse(boolean alreadyBootstrapped) { - this.alreadyBootstrapped = alreadyBootstrapped; - } - - public BootstrapClusterResponse(StreamInput in) throws IOException { - super(in); - alreadyBootstrapped = in.readBoolean(); - } - - /** - * @return whether this node already knew that the cluster had been bootstrapped when handling this request. - */ - public boolean getAlreadyBootstrapped() { - return alreadyBootstrapped; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(alreadyBootstrapped); - } - - @Override - public String toString() { - return "BootstrapClusterResponse{" + - "alreadyBootstrapped=" + alreadyBootstrapped + - '}'; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfiguration.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfiguration.java deleted file mode 100644 index 822287af77465..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfiguration.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; - -public class BootstrapConfiguration implements Writeable { - - private final List nodeDescriptions; - - public BootstrapConfiguration(List nodeDescriptions) { - if (nodeDescriptions.isEmpty()) { - throw new IllegalArgumentException("cannot create empty bootstrap configuration"); - } - this.nodeDescriptions = Collections.unmodifiableList(new ArrayList<>(nodeDescriptions)); - } - - public BootstrapConfiguration(StreamInput in) throws IOException { - nodeDescriptions = Collections.unmodifiableList(in.readList(NodeDescription::new)); - assert nodeDescriptions.isEmpty() == false; - } - - public List getNodeDescriptions() { - return nodeDescriptions; - } - - public VotingConfiguration resolve(Iterable discoveredNodes) { - final Set selectedNodes = new HashSet<>(); - for (final NodeDescription nodeDescription : nodeDescriptions) { - final DiscoveryNode discoveredNode = nodeDescription.resolve(discoveredNodes); - if (selectedNodes.add(discoveredNode) == false) { - throw new ElasticsearchException("multiple nodes matching {} in {}", discoveredNode, this); - } - } - - final Set nodeIds = selectedNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); - assert nodeIds.size() == selectedNodes.size() : selectedNodes + " does not contain distinct IDs"; - return new VotingConfiguration(nodeIds); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeList(nodeDescriptions); - } - - @Override - public String toString() { - return "BootstrapConfiguration{" + - "nodeDescriptions=" + nodeDescriptions + - '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - BootstrapConfiguration that = (BootstrapConfiguration) o; - return Objects.equals(nodeDescriptions, that.nodeDescriptions); - } - - @Override - public int hashCode() { - return Objects.hash(nodeDescriptions); - } - - public static class NodeDescription implements Writeable { - - @Nullable - private final String id; - - private final String name; - - @Nullable - public String getId() { - return id; - } - - public String getName() { - return name; - } - - public NodeDescription(@Nullable String id, String name) { - this.id = id; - this.name = Objects.requireNonNull(name); - } - - public NodeDescription(DiscoveryNode discoveryNode) { - this(discoveryNode.getId(), discoveryNode.getName()); - } - - public NodeDescription(StreamInput in) throws IOException { - this(in.readOptionalString(), in.readString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(id); - out.writeString(name); - } - - @Override - public String toString() { - return "NodeDescription{" + - "id='" + id + '\'' + - ", name='" + name + '\'' + - '}'; - } - - public DiscoveryNode resolve(Iterable discoveredNodes) { - DiscoveryNode selectedNode = null; - for (final DiscoveryNode discoveredNode : discoveredNodes) { - assert discoveredNode.isMasterNode() : discoveredNode; - if (discoveredNode.getName().equals(name)) { - if (id == null || id.equals(discoveredNode.getId())) { - if (selectedNode != null) { - throw new ElasticsearchException( - "discovered multiple nodes matching {} in {}", this, discoveredNodes); - } - selectedNode = discoveredNode; - } else { - throw new ElasticsearchException("node id mismatch comparing {} to {}", this, discoveredNode); - } - } else if (id != null && id.equals(discoveredNode.getId())) { - throw new ElasticsearchException("node name mismatch comparing {} to {}", this, discoveredNode); - } - } - if (selectedNode == null) { - throw new ElasticsearchException("no node matching {} found in {}", this, discoveredNodes); - } - - return selectedNode; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - NodeDescription that = (NodeDescription) o; - return Objects.equals(id, that.id) && - Objects.equals(name, that.name); - } - - @Override - public int hashCode() { - return Objects.hash(id, name); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesAction.java deleted file mode 100644 index acaef284a5420..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesAction.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.Action; -import org.elasticsearch.common.io.stream.Writeable.Reader; - -public class GetDiscoveredNodesAction extends Action { - public static final GetDiscoveredNodesAction INSTANCE = new GetDiscoveredNodesAction(); - public static final String NAME = "cluster:admin/bootstrap/discover_nodes"; - - private GetDiscoveredNodesAction() { - super(NAME); - } - - @Override - public GetDiscoveredNodesResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public Reader getResponseReader() { - return GetDiscoveredNodesResponse::new; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequest.java deleted file mode 100644 index f91a4de5263be..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequest.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; - -/** - * Request the set of master-eligible nodes discovered by this node. Most useful in a brand-new cluster as a precursor to setting the - * initial configuration using {@link BootstrapClusterRequest}. - */ -public class GetDiscoveredNodesRequest extends ActionRequest { - - @Nullable // if the request should wait indefinitely - private TimeValue timeout = TimeValue.timeValueSeconds(30); - - private List requiredNodes = Collections.emptyList(); - - public GetDiscoveredNodesRequest() { - } - - public GetDiscoveredNodesRequest(StreamInput in) throws IOException { - super(in); - timeout = in.readOptionalTimeValue(); - requiredNodes = in.readList(StreamInput::readString); - } - - /** - * Sometimes it is useful to wait until enough nodes have been discovered, rather than failing immediately. This parameter controls how - * long to wait, and defaults to 30s. - * - * @param timeout how long to wait to discover sufficiently many nodes to respond successfully. - */ - public void setTimeout(@Nullable TimeValue timeout) { - if (timeout != null && timeout.compareTo(TimeValue.ZERO) < 0) { - throw new IllegalArgumentException("negative timeout of [" + timeout + "] is not allowed"); - } - this.timeout = timeout; - } - - /** - * Sometimes it is useful to wait until enough nodes have been discovered, rather than failing immediately. This parameter controls how - * long to wait, and defaults to 30s. - * - * @return how long to wait to discover sufficiently many nodes to respond successfully. - */ - @Nullable - public TimeValue getTimeout() { - return timeout; - } - - /** - * Sometimes it is useful only to receive a successful response after discovering a certain set of master-eligible nodes. - * This parameter gives the names or transport addresses of the expected nodes. - * - * @return list of expected nodes - */ - public List getRequiredNodes() { - return requiredNodes; - } - - /** - * Sometimes it is useful only to receive a successful response after discovering a certain set of master-eligible nodes. - * This parameter gives the names or transport addresses of the expected nodes. - * - * @param requiredNodes list of expected nodes - */ - public void setRequiredNodes(final List requiredNodes) { - this.requiredNodes = requiredNodes; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalTimeValue(timeout); - out.writeStringList(requiredNodes); - } - - @Override - public String toString() { - return "GetDiscoveredNodesRequest{" + - "timeout=" + timeout + - ", requiredNodes=" + requiredNodes + "}"; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponse.java deleted file mode 100644 index f697e16c03c2c..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponse.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; -import java.util.stream.Collectors; - -/** - * Response to {@link GetDiscoveredNodesRequest}, containing the set of master-eligible nodes that were discovered. - */ -public class GetDiscoveredNodesResponse extends ActionResponse { - private final Set nodes; - - public GetDiscoveredNodesResponse(Set nodes) { - this.nodes = Collections.unmodifiableSet(new HashSet<>(nodes)); - } - - public GetDiscoveredNodesResponse(StreamInput in) throws IOException { - super(in); - nodes = Collections.unmodifiableSet(in.readSet(DiscoveryNode::new)); - } - - /** - * @return the set of nodes that were discovered. - */ - public Set getNodes() { - return nodes; - } - - /** - * @return a bootstrap configuration constructed from the set of nodes that were discovered, in order to make a - * {@link BootstrapClusterRequest}. - */ - public BootstrapConfiguration getBootstrapConfiguration() { - return new BootstrapConfiguration(nodes.stream().map(NodeDescription::new).collect(Collectors.toList())); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeCollection(nodes); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterAction.java deleted file mode 100644 index 32a9f39cc0db8..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterAction.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.transport.TransportService; - -import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_TYPE_SETTING; - -public class TransportBootstrapClusterAction extends HandledTransportAction { - - @Nullable // TODO make this not nullable - private final Coordinator coordinator; - private final TransportService transportService; - private final String discoveryType; - - @Inject - public TransportBootstrapClusterAction(Settings settings, ActionFilters actionFilters, TransportService transportService, - Discovery discovery) { - super(BootstrapClusterAction.NAME, transportService, actionFilters, BootstrapClusterRequest::new); - this.transportService = transportService; - this.discoveryType = DISCOVERY_TYPE_SETTING.get(settings); - if (discovery instanceof Coordinator) { - coordinator = (Coordinator) discovery; - } else { - coordinator = null; - } - } - - @Override - protected void doExecute(Task task, BootstrapClusterRequest request, ActionListener listener) { - if (coordinator == null) { // TODO remove when not nullable - throw new IllegalArgumentException("cluster bootstrapping is not supported by discovery type [" + discoveryType + "]"); - } - - final DiscoveryNode localNode = transportService.getLocalNode(); - assert localNode != null; - if (localNode.isMasterNode() == false) { - throw new IllegalArgumentException( - "this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node"); - } - - transportService.getThreadPool().generic().execute(new AbstractRunnable() { - @Override - public void doRun() { - listener.onResponse(new BootstrapClusterResponse( - coordinator.setInitialConfiguration(request.getBootstrapConfiguration()) == false)); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - - @Override - public String toString() { - return "setting initial configuration with " + request; - } - }); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesAction.java deleted file mode 100644 index 6f6336c3bd5f3..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesAction.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.ListenableFuture; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportService; - -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; - -import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_TYPE_SETTING; - -public class TransportGetDiscoveredNodesAction extends HandledTransportAction { - - @Nullable // TODO make this not nullable - private final Coordinator coordinator; - private final TransportService transportService; - private final String discoveryType; - - @Inject - public TransportGetDiscoveredNodesAction(Settings settings, ActionFilters actionFilters, TransportService transportService, - Discovery discovery) { - super(GetDiscoveredNodesAction.NAME, transportService, actionFilters, - (Reader) GetDiscoveredNodesRequest::new); - - this.discoveryType = DISCOVERY_TYPE_SETTING.get(settings); - this.transportService = transportService; - if (discovery instanceof Coordinator) { - coordinator = (Coordinator) discovery; - } else { - coordinator = null; - } - } - - @Override - protected void doExecute(Task task, GetDiscoveredNodesRequest request, ActionListener listener) { - if (coordinator == null) { // TODO remove when not nullable - throw new IllegalArgumentException("discovered nodes are not exposed by discovery type [" + discoveryType + "]"); - } - - final DiscoveryNode localNode = transportService.getLocalNode(); - assert localNode != null; - if (localNode.isMasterNode() == false) { - throw new IllegalArgumentException( - "this node is not master-eligible, but discovered nodes are only exposed by master-eligible nodes"); - } - final ExecutorService directExecutor = EsExecutors.newDirectExecutorService(); - final AtomicBoolean listenerNotified = new AtomicBoolean(); - final ListenableFuture listenableFuture = new ListenableFuture<>(); - final ThreadPool threadPool = transportService.getThreadPool(); - listenableFuture.addListener(listener, directExecutor, threadPool.getThreadContext()); - // TODO make it so that listenableFuture copes with multiple completions, and then remove listenerNotified - - final ActionListener> respondIfRequestSatisfied = new ActionListener>() { - @Override - public void onResponse(Iterable nodes) { - final Set nodesSet = new LinkedHashSet<>(); - nodesSet.add(localNode); - nodes.forEach(nodesSet::add); - logger.trace("discovered {}", nodesSet); - try { - if (checkWaitRequirements(request, nodesSet)) { - final GetDiscoveredNodesResponse response = new GetDiscoveredNodesResponse(nodesSet); - if (listenerNotified.compareAndSet(false, true)) { - listenableFuture.onResponse(response); - } - } - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - if (listenerNotified.compareAndSet(false, true)) { - listenableFuture.onFailure(e); - } - } - - @Override - public String toString() { - return "waiting for " + request; - } - }; - - final Releasable releasable = coordinator.withDiscoveryListener(respondIfRequestSatisfied); - listenableFuture.addListener(ActionListener.wrap(releasable::close), directExecutor, threadPool.getThreadContext()); - - if (coordinator.isInitialConfigurationSet()) { - respondIfRequestSatisfied.onFailure(new ClusterAlreadyBootstrappedException()); - } else { - respondIfRequestSatisfied.onResponse(coordinator.getFoundPeers()); - } - - if (request.getTimeout() != null) { - threadPool.schedule(request.getTimeout(), Names.SAME, new Runnable() { - @Override - public void run() { - respondIfRequestSatisfied.onFailure(new ElasticsearchTimeoutException("timed out while waiting for " + request)); - } - - @Override - public String toString() { - return "timeout handler for " + request; - } - }); - } - } - - private static boolean matchesRequirement(DiscoveryNode discoveryNode, String requirement) { - return discoveryNode.getName().equals(requirement) - || discoveryNode.getAddress().toString().equals(requirement) - || discoveryNode.getAddress().getAddress().equals(requirement); - } - - private static boolean checkWaitRequirements(GetDiscoveredNodesRequest request, Set nodes) { - List requirements = request.getRequiredNodes(); - final Set selectedNodes = new HashSet<>(); - for (final String requirement : requirements) { - final Set matchingNodes - = nodes.stream().filter(n -> matchesRequirement(n, requirement)).collect(Collectors.toSet()); - - if (matchingNodes.isEmpty()) { - return false; - } - if (matchingNodes.size() > 1) { - throw new IllegalArgumentException("[" + requirement + "] matches " + matchingNodes); - } - - for (final DiscoveryNode matchingNode : matchingNodes) { - if (selectedNodes.add(matchingNode) == false) { - throw new IllegalArgumentException("[" + matchingNode + "] matches " + - requirements.stream().filter(r -> matchesRequirement(matchingNode, requirement)).collect(Collectors.toList())); - } - } - } - - return true; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index f031dfa581064..d6abbf73e8864 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.Nullable; @@ -43,7 +44,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.stream.Collectors; @@ -70,7 +70,7 @@ abstract class AbstractSearchAsyncAction exten private final Object shardFailuresMutex = new Object(); private final AtomicInteger successfulOps = new AtomicInteger(); private final AtomicInteger skippedOps = new AtomicInteger(); - private final TransportSearchAction.SearchTimeProvider timeProvider; + private final SearchTimeProvider timeProvider; private final SearchResponse.Clusters clusters; AbstractSearchAsyncAction(String name, Logger logger, SearchTransportService searchTransportService, @@ -79,7 +79,7 @@ abstract class AbstractSearchAsyncAction exten Map> indexRoutings, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, - TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, + SearchTimeProvider timeProvider, long clusterStateVersion, SearchTask task, SearchPhaseResults resultConsumer, int maxConcurrentRequestsPerNode, SearchResponse.Clusters clusters) { super(name, request, shardsIts, logger, maxConcurrentRequestsPerNode, executor); @@ -103,8 +103,7 @@ abstract class AbstractSearchAsyncAction exten * Builds how long it took to execute the search. */ long buildTookInMillis() { - return TimeUnit.NANOSECONDS.toMillis( - timeProvider.getRelativeCurrentNanos() - timeProvider.getRelativeStartNanos()); + return timeProvider.buildTookInMillis(); } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 418d95b2077a9..027d9d5f10c25 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -170,7 +170,7 @@ static SortedTopDocs sortDocs(boolean ignoreFrom, Collection 0) { setShardIndex(td.topDocs, queryResult.getShardIndex()); @@ -439,12 +439,10 @@ private ReducedQueryPhase reducedQueryPhase(Collection= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases; numReducePhases++; // increment for this phase - boolean timedOut = false; - Boolean terminatedEarly = null; if (queryResults.isEmpty()) { // early terminate we have nothing to reduce final TotalHits totalHits = topDocsStats.getTotalHits(); - return new ReducedQueryPhase(totalHits, topDocsStats.fetchHits, topDocsStats.maxScore, - timedOut, terminatedEarly, null, null, null, SortedTopDocs.EMPTY, null, numReducePhases, 0, 0, true); + return new ReducedQueryPhase(totalHits, topDocsStats.fetchHits, topDocsStats.getMaxScore(), + false, null, null, null, null, SortedTopDocs.EMPTY, null, numReducePhases, 0, 0, true); } final QuerySearchResult firstResult = queryResults.stream().findFirst().get().queryResult(); final boolean hasSuggest = firstResult.suggest() != null; @@ -476,16 +474,6 @@ private ReducedQueryPhase reducedQueryPhase(Collection> suggestion : result.suggest()) { @@ -508,8 +496,8 @@ private ReducedQueryPhase reducedQueryPhase(CollectiontrackTotalHitsUpTo * so we return this lower bound when the total hits is greater than this value. * This can happen when multiple shards are merged since the limit to track total hits @@ -777,7 +763,7 @@ TotalHits getTotalHits() { } } - void add(TopDocsAndMaxScore topDocs) { + void add(TopDocsAndMaxScore topDocs, boolean timedOut, Boolean terminatedEarly) { if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { totalHits += topDocs.topDocs.totalHits.value; if (topDocs.topDocs.totalHits.relation == Relation.GREATER_THAN_OR_EQUAL_TO) { @@ -788,6 +774,16 @@ void add(TopDocsAndMaxScore topDocs) { if (!Float.isNaN(topDocs.maxScore)) { maxScore = Math.max(maxScore, topDocs.maxScore); } + if (timedOut) { + this.timedOut = true; + } + if (terminatedEarly != null) { + if (this.terminatedEarly == null) { + this.terminatedEarly = terminatedEarly; + } else if (terminatedEarly) { + this.terminatedEarly = true; + } + } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java new file mode 100644 index 0000000000000..b146d42c0d2e6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -0,0 +1,297 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.grouping.CollapseTopFieldDocs; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.SearchProfileShardResults; +import org.elasticsearch.search.suggest.Suggest; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Function; + +import static org.elasticsearch.action.search.SearchPhaseController.TopDocsStats; +import static org.elasticsearch.action.search.SearchPhaseController.mergeTopDocs; +import static org.elasticsearch.action.search.SearchResponse.Clusters; + +/** + * Merges multiple search responses into one. Used in cross-cluster search when reduction is performed locally on each cluster. + * The CCS coordinating node sends one search request per remote cluster involved and gets one search response back from each one of them. + * Such responses contain all the info to be able to perform an additional reduction and return results back to the user. + * Preconditions are that only non final reduction has been performed on each cluster, meaning that buckets have not been pruned locally + * and pipeline aggregations have not yet been executed. Also, from+size search hits need to be requested to each cluster and such results + * have all already been fetched downstream. + * This approach consists of a different trade-off compared to ordinary cross-cluster search where we fan out to all the shards, no matter + * whether they belong to the local or the remote cluster. Assuming that there commonly is network latency when communicating with remote + * clusters, limiting the number of requests to one per cluster is beneficial, and outweighs the downside of fetching many more hits than + * needed downstream and returning bigger responses to the coordinating node. + * Known limitations: + * - scroll requests are not supported + * - field collapsing is supported, but whenever inner_hits are requested, they will be retrieved by each cluster locally after the fetch + * phase, through the {@link ExpandSearchPhase}. Such inner_hits are not merged together as part of hits reduction. + */ +//TODO it may make sense to integrate the remote clusters responses as a shard response in the initial search phase and ignore hits coming +//from the remote clusters in the fetch phase. This would be identical to the removed QueryAndFetch strategy except that only the remote +//cluster response would have the fetch results. +final class SearchResponseMerger { + private final int from; + private final int size; + private final int trackTotalHitsUpTo; + private final SearchTimeProvider searchTimeProvider; + private final Function reduceContextFunction; + private final List searchResponses = new CopyOnWriteArrayList<>(); + + SearchResponseMerger(int from, int size, int trackTotalHitsUpTo, SearchTimeProvider searchTimeProvider, + Function reduceContextFunction) { + this.from = from; + this.size = size; + this.trackTotalHitsUpTo = trackTotalHitsUpTo; + this.searchTimeProvider = Objects.requireNonNull(searchTimeProvider); + this.reduceContextFunction = Objects.requireNonNull(reduceContextFunction); + } + + /** + * Add a search response to the list of responses to be merged together into one. + * Merges currently happen at once when all responses are available and {@link #getMergedResponse(Clusters)} )} is called. + * That may change in the future as it's possible to introduce incremental merges as responses come in if necessary. + */ + void add(SearchResponse searchResponse) { + searchResponses.add(searchResponse); + } + + /** + * Returns the merged response. To be called once all responses have been added through {@link #add(SearchResponse)} + * so that all responses are merged into a single one. + */ + SearchResponse getMergedResponse(Clusters clusters) { + assert searchResponses.size() > 1; + int totalShards = 0; + int skippedShards = 0; + int successfulShards = 0; + //the current reduce phase counts as one + int numReducePhases = 1; + List failures = new ArrayList<>(); + Map profileResults = new HashMap<>(); + List aggs = new ArrayList<>(); + Map shards = new TreeMap<>(); + List topDocsList = new ArrayList<>(searchResponses.size()); + Map> groupedSuggestions = new HashMap<>(); + Boolean trackTotalHits = null; + + TopDocsStats topDocsStats = new TopDocsStats(trackTotalHitsUpTo); + + for (SearchResponse searchResponse : searchResponses) { + totalShards += searchResponse.getTotalShards(); + skippedShards += searchResponse.getSkippedShards(); + successfulShards += searchResponse.getSuccessfulShards(); + numReducePhases += searchResponse.getNumReducePhases(); + + Collections.addAll(failures, searchResponse.getShardFailures()); + + profileResults.putAll(searchResponse.getProfileResults()); + + if (searchResponse.getAggregations() != null) { + InternalAggregations internalAggs = (InternalAggregations) searchResponse.getAggregations(); + aggs.add(internalAggs); + } + + Suggest suggest = searchResponse.getSuggest(); + if (suggest != null) { + for (Suggest.Suggestion> entries : suggest) { + List suggestionList = groupedSuggestions.computeIfAbsent(entries.getName(), s -> new ArrayList<>()); + suggestionList.add(entries); + } + } + + SearchHits searchHits = searchResponse.getHits(); + + final TotalHits totalHits; + if (searchHits.getTotalHits() == null) { + //in case we didn't track total hits, we get null from each cluster, but we need to set 0 eq to the TopDocs + totalHits = new TotalHits(0, TotalHits.Relation.EQUAL_TO); + assert trackTotalHits == null || trackTotalHits == false; + trackTotalHits = false; + } else { + totalHits = searchHits.getTotalHits(); + assert trackTotalHits == null || trackTotalHits; + trackTotalHits = true; + } + TopDocs topDocs = searchHitsToTopDocs(searchHits, totalHits, shards); + topDocsStats.add(new TopDocsAndMaxScore(topDocs, searchHits.getMaxScore()), + searchResponse.isTimedOut(), searchResponse.isTerminatedEarly()); + topDocsList.add(topDocs); + } + + //after going through all the hits and collecting all their distinct shards, we can assign shardIndex and set it to the ScoreDocs + setShardIndex(shards, topDocsList); + TopDocs topDocs = mergeTopDocs(topDocsList, size, from); + SearchHits mergedSearchHits = topDocsToSearchHits(topDocs, topDocsStats); + Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions)); + InternalAggregations reducedAggs = InternalAggregations.reduce(aggs, reduceContextFunction.apply(true)); + ShardSearchFailure[] shardFailures = failures.toArray(ShardSearchFailure.EMPTY_ARRAY); + //make failures ordering consistent with ordinary search and CCS + Arrays.sort(shardFailures, FAILURES_COMPARATOR); + InternalSearchResponse response = new InternalSearchResponse(mergedSearchHits, reducedAggs, suggest, + new SearchProfileShardResults(profileResults), topDocsStats.timedOut, topDocsStats.terminatedEarly, numReducePhases); + long tookInMillis = searchTimeProvider.buildTookInMillis(); + return new SearchResponse(response, null, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters); + } + + private static final Comparator FAILURES_COMPARATOR = new Comparator() { + @Override + public int compare(ShardSearchFailure o1, ShardSearchFailure o2) { + ShardId shardId1 = extractShardId(o1); + ShardId shardId2 = extractShardId(o2); + if (shardId1 == null && shardId2 == null) { + return 0; + } + if (shardId1 == null) { + return -1; + } + if (shardId2 == null) { + return 1; + } + return shardId1.compareTo(shardId2); + } + + private ShardId extractShardId(ShardSearchFailure failure) { + SearchShardTarget shard = failure.shard(); + if (shard != null) { + return shard.getShardId(); + } + Throwable cause = failure.getCause(); + if (cause instanceof ElasticsearchException) { + ElasticsearchException e = (ElasticsearchException) cause; + return e.getShardId(); + } + return null; + } + }; + + private static TopDocs searchHitsToTopDocs(SearchHits searchHits, TotalHits totalHits, Map shards) { + SearchHit[] hits = searchHits.getHits(); + ScoreDoc[] scoreDocs = new ScoreDoc[hits.length]; + final TopDocs topDocs; + if (searchHits.getSortFields() != null) { + if (searchHits.getCollapseField() != null) { + assert searchHits.getCollapseValues() != null; + topDocs = new CollapseTopFieldDocs(searchHits.getCollapseField(), totalHits, scoreDocs, + searchHits.getSortFields(), searchHits.getCollapseValues()); + } else { + topDocs = new TopFieldDocs(totalHits, scoreDocs, searchHits.getSortFields()); + } + } else { + topDocs = new TopDocs(totalHits, scoreDocs); + } + + for (int i = 0; i < hits.length; i++) { + SearchHit hit = hits[i]; + ShardId shardId = hit.getShard().getShardId(); + shards.putIfAbsent(shardId, null); + final SortField[] sortFields = searchHits.getSortFields(); + final Object[] sortValues; + if (sortFields == null) { + sortValues = null; + } else { + if (sortFields.length == 1 && sortFields[0].getType() == SortField.Type.SCORE) { + sortValues = new Object[]{hit.getScore()}; + } else { + sortValues = hit.getRawSortValues(); + } + } + scoreDocs[i] = new FieldDocAndSearchHit(hit.docId(), hit.getScore(), sortValues, hit); + } + return topDocs; + } + + private static void setShardIndex(Map shards, List topDocsList) { + int shardIndex = 0; + for (Map.Entry shard : shards.entrySet()) { + shard.setValue(shardIndex++); + } + //and go through all the scoreDocs from each cluster and set their corresponding shardIndex + for (TopDocs topDocs : topDocsList) { + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + FieldDocAndSearchHit fieldDocAndSearchHit = (FieldDocAndSearchHit) scoreDoc; + //When hits come from the indices with same names on multiple clusters and same shard identifier, we rely on such indices + //to have a different uuid across multiple clusters. That's how they will get a different shardIndex. + ShardId shardId = fieldDocAndSearchHit.searchHit.getShard().getShardId(); + fieldDocAndSearchHit.shardIndex = shards.get(shardId); + } + } + } + + private static SearchHits topDocsToSearchHits(TopDocs topDocs, TopDocsStats topDocsStats) { + SearchHit[] searchHits = new SearchHit[topDocs.scoreDocs.length]; + for (int i = 0; i < topDocs.scoreDocs.length; i++) { + FieldDocAndSearchHit scoreDoc = (FieldDocAndSearchHit)topDocs.scoreDocs[i]; + searchHits[i] = scoreDoc.searchHit; + } + + SortField[] sortFields = null; + String collapseField = null; + Object[] collapseValues = null; + if (topDocs instanceof TopFieldDocs) { + sortFields = ((TopFieldDocs)topDocs).fields; + if (topDocs instanceof CollapseTopFieldDocs) { + CollapseTopFieldDocs collapseTopFieldDocs = (CollapseTopFieldDocs)topDocs; + collapseField = collapseTopFieldDocs.field; + collapseValues = collapseTopFieldDocs.collapseValues; + } + } + return new SearchHits(searchHits, topDocsStats.getTotalHits(), topDocsStats.getMaxScore(), + sortFields, collapseField, collapseValues); + } + + private static final class FieldDocAndSearchHit extends FieldDoc { + private final SearchHit searchHit; + + //to simplify things, we use a FieldDoc all the time, even when only a ScoreDoc is needed, in which case fields are null. + FieldDocAndSearchHit(int doc, float score, Object[] fields, SearchHit searchHit) { + super(doc, score, fields); + this.searchHit = searchHit; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 88e2764982cb4..3f03c521df52a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -61,6 +61,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.LongSupplier; @@ -140,7 +141,7 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * to moving backwards due to NTP and other such complexities, etc.). There are also issues with * using a relative clock for reporting real time. Thus, we simply separate these two uses. */ - static class SearchTimeProvider { + static final class SearchTimeProvider { private final long absoluteStartMillis; private final long relativeStartNanos; @@ -170,12 +171,8 @@ long getAbsoluteStartMillis() { return absoluteStartMillis; } - long getRelativeStartNanos() { - return relativeStartNanos; - } - - long getRelativeCurrentNanos() { - return relativeCurrentNanosProvider.getAsLong(); + long buildTookInMillis() { + return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterAlreadyBootstrappedException.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterAlreadyBootstrappedException.java deleted file mode 100644 index cc1c77c88477c..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterAlreadyBootstrappedException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster.coordination; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; - -/** - * Exception thrown if trying to discovery nodes in order to perform cluster bootstrapping, but a cluster is formed before all the required - * nodes are discovered. - */ -public class ClusterAlreadyBootstrappedException extends ElasticsearchException { - public ClusterAlreadyBootstrappedException() { - super("node has already joined a bootstrapped cluster, bootstrapping is not required"); - } - - public ClusterAlreadyBootstrappedException(StreamInput in) throws IOException { - super(in); - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java index fc3f4493104fc..d21c54c03e4e5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java @@ -21,56 +21,73 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterResponse; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesAction; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesResponse; +import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; -import java.io.IOException; -import java.util.Collections; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; public class ClusterBootstrapService { - private static final Logger logger = LogManager.getLogger(ClusterBootstrapService.class); - public static final Setting> INITIAL_MASTER_NODES_SETTING = - Setting.listSetting("cluster.initial_master_nodes", Collections.emptyList(), Function.identity(), Property.NodeScope); + Setting.listSetting("cluster.initial_master_nodes", emptyList(), Function.identity(), Property.NodeScope); public static final Setting UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING = Setting.timeSetting("discovery.unconfigured_bootstrap_timeout", TimeValue.timeValueSeconds(3), TimeValue.timeValueMillis(1), Property.NodeScope); - private final List initialMasterNodes; - @Nullable + static final String BOOTSTRAP_PLACEHOLDER_PREFIX = "{bootstrap-placeholder}-"; + + private static final Logger logger = LogManager.getLogger(ClusterBootstrapService.class); + private final Set bootstrapRequirements; + @Nullable // null if discoveryIsConfigured() private final TimeValue unconfiguredBootstrapTimeout; private final TransportService transportService; - private volatile boolean running; + private final Supplier> discoveredNodesSupplier; + private final BooleanSupplier isBootstrappedSupplier; + private final Consumer votingConfigurationConsumer; + private final AtomicBoolean bootstrappingPermitted = new AtomicBoolean(true); + + public ClusterBootstrapService(Settings settings, TransportService transportService, + Supplier> discoveredNodesSupplier, BooleanSupplier isBootstrappedSupplier, + Consumer votingConfigurationConsumer) { + + final List initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); + bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); + if (bootstrapRequirements.size() != initialMasterNodes.size()) { + throw new IllegalArgumentException( + "setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] contains duplicates: " + initialMasterNodes); + } - public ClusterBootstrapService(Settings settings, TransportService transportService) { - initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); this.transportService = transportService; + this.discoveredNodesSupplier = discoveredNodesSupplier; + this.isBootstrappedSupplier = isBootstrappedSupplier; + this.votingConfigurationConsumer = votingConfigurationConsumer; } public static boolean discoveryIsConfigured(Settings settings) { @@ -78,157 +95,135 @@ public static boolean discoveryIsConfigured(Settings settings) { .anyMatch(s -> s.exists(settings)); } - public void start() { - assert running == false; - running = true; + void onFoundPeersUpdated() { + final Set nodes = getDiscoveredNodes(); + if (transportService.getLocalNode().isMasterNode() && bootstrapRequirements.isEmpty() == false + && isBootstrappedSupplier.getAsBoolean() == false && nodes.stream().noneMatch(Coordinator::isZen1Node)) { + + final Tuple,List> requirementMatchingResult; + try { + requirementMatchingResult = checkRequirements(nodes); + } catch (IllegalStateException e) { + logger.warn("bootstrapping cancelled", e); + bootstrappingPermitted.set(false); + return; + } + + final Set nodesMatchingRequirements = requirementMatchingResult.v1(); + final List unsatisfiedRequirements = requirementMatchingResult.v2(); + logger.trace("nodesMatchingRequirements={}, unsatisfiedRequirements={}, bootstrapRequirements={}", + nodesMatchingRequirements, unsatisfiedRequirements, bootstrapRequirements); + + if (nodesMatchingRequirements.size() * 2 > bootstrapRequirements.size()) { + startBootstrap(nodesMatchingRequirements, unsatisfiedRequirements); + } + } + } + + void scheduleUnconfiguredBootstrap() { + if (unconfiguredBootstrapTimeout == null) { + return; + } if (transportService.getLocalNode().isMasterNode() == false) { return; } - if (unconfiguredBootstrapTimeout != null) { - logger.info("no discovery configuration found, will perform best-effort cluster bootstrapping after [{}] " + - "unless existing master is discovered", unconfiguredBootstrapTimeout); - final ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + logger.info("no discovery configuration found, will perform best-effort cluster bootstrapping after [{}] " + + "unless existing master is discovered", unconfiguredBootstrapTimeout); + + transportService.getThreadPool().scheduleUnlessShuttingDown(unconfiguredBootstrapTimeout, Names.GENERIC, new Runnable() { + @Override + public void run() { + final Set discoveredNodes = getDiscoveredNodes(); + final List zen1Nodes = discoveredNodes.stream().filter(Coordinator::isZen1Node).collect(Collectors.toList()); + if (zen1Nodes.isEmpty()) { + logger.debug("performing best-effort cluster bootstrapping with {}", discoveredNodes); + startBootstrap(discoveredNodes, emptyList()); + } else { + logger.info("avoiding best-effort cluster bootstrapping due to discovery of pre-7.0 nodes {}", zen1Nodes); + } + } + + @Override + public String toString() { + return "unconfigured-discovery delayed bootstrap"; + } + }); + } + + private Set getDiscoveredNodes() { + return Stream.concat(Stream.of(transportService.getLocalNode()), + StreamSupport.stream(discoveredNodesSupplier.get().spliterator(), false)).collect(Collectors.toSet()); + } + + private void startBootstrap(Set discoveryNodes, List unsatisfiedRequirements) { + assert discoveryNodes.stream().allMatch(DiscoveryNode::isMasterNode) : discoveryNodes; + assert discoveryNodes.stream().noneMatch(Coordinator::isZen1Node) : discoveryNodes; + assert unsatisfiedRequirements.size() < discoveryNodes.size() : discoveryNodes + " smaller than " + unsatisfiedRequirements; + if (bootstrappingPermitted.compareAndSet(true, false)) { + doBootstrap(new VotingConfiguration(Stream.concat(discoveryNodes.stream().map(DiscoveryNode::getId), + unsatisfiedRequirements.stream().map(s -> BOOTSTRAP_PLACEHOLDER_PREFIX + s)) + .collect(Collectors.toSet()))); + } + } + + public static boolean isBootstrapPlaceholder(String nodeId) { + return nodeId.startsWith(BOOTSTRAP_PLACEHOLDER_PREFIX); + } + + private void doBootstrap(VotingConfiguration votingConfiguration) { + assert transportService.getLocalNode().isMasterNode(); - transportService.getThreadPool().scheduleUnlessShuttingDown(unconfiguredBootstrapTimeout, Names.SAME, new Runnable() { + try { + votingConfigurationConsumer.accept(votingConfiguration); + } catch (Exception e) { + logger.warn(new ParameterizedMessage("exception when bootstrapping with {}, rescheduling", votingConfiguration), e); + transportService.getThreadPool().scheduleUnlessShuttingDown(TimeValue.timeValueSeconds(10), Names.GENERIC, + new Runnable() { @Override public void run() { - // TODO: remove the following line once schedule method properly preserves thread context - threadContext.markAsSystemContext(); - final GetDiscoveredNodesRequest request = new GetDiscoveredNodesRequest(); - logger.trace("sending {}", request); - transportService.sendRequest(transportService.getLocalNode(), GetDiscoveredNodesAction.NAME, request, - new TransportResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - logger.debug("discovered {}, starting to bootstrap", response.getNodes()); - awaitBootstrap(response.getBootstrapConfiguration()); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - if (rootCause instanceof ClusterAlreadyBootstrappedException) { - logger.debug(rootCause.getMessage(), rootCause); - } else { - logger.warn("discovery attempt failed", exp); - } - } - - @Override - public String executor() { - return Names.SAME; - } - - @Override - public GetDiscoveredNodesResponse read(StreamInput in) throws IOException { - return new GetDiscoveredNodesResponse(in); - } - }); + doBootstrap(votingConfiguration); } @Override public String toString() { - return "unconfigured-discovery delayed bootstrap"; + return "retry of failed bootstrapping with " + votingConfiguration; } - }); - - } - } else if (initialMasterNodes.isEmpty() == false) { - logger.debug("waiting for discovery of master-eligible nodes matching {}", initialMasterNodes); - - final ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); - - final GetDiscoveredNodesRequest request = new GetDiscoveredNodesRequest(); - request.setRequiredNodes(initialMasterNodes); - request.setTimeout(null); - logger.trace("sending {}", request); - transportService.sendRequest(transportService.getLocalNode(), GetDiscoveredNodesAction.NAME, request, - new TransportResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - assert response.getNodes().stream().allMatch(DiscoveryNode::isMasterNode); - logger.debug("discovered {}, starting to bootstrap", response.getNodes()); - awaitBootstrap(response.getBootstrapConfiguration()); - } - - @Override - public void handleException(TransportException exp) { - logger.warn("discovery attempt failed", exp); - } - - @Override - public String executor() { - return Names.SAME; - } - - @Override - public GetDiscoveredNodesResponse read(StreamInput in) throws IOException { - return new GetDiscoveredNodesResponse(in); - } - }); - } + } + ); } } - public void stop() { - running = false; + private static boolean matchesRequirement(DiscoveryNode discoveryNode, String requirement) { + return discoveryNode.getName().equals(requirement) + || discoveryNode.getAddress().toString().equals(requirement) + || discoveryNode.getAddress().getAddress().equals(requirement); } - private void awaitBootstrap(final BootstrapConfiguration bootstrapConfiguration) { - if (running == false) { - logger.debug("awaitBootstrap: not running"); - return; - } + private Tuple,List> checkRequirements(Set nodes) { + final Set selectedNodes = new HashSet<>(); + final List unmatchedRequirements = new ArrayList<>(); + for (final String bootstrapRequirement : bootstrapRequirements) { + final Set matchingNodes + = nodes.stream().filter(n -> matchesRequirement(n, bootstrapRequirement)).collect(Collectors.toSet()); - BootstrapClusterRequest request = new BootstrapClusterRequest(bootstrapConfiguration); - logger.trace("sending {}", request); - transportService.sendRequest(transportService.getLocalNode(), BootstrapClusterAction.NAME, request, - new TransportResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - logger.debug("automatic cluster bootstrapping successful: received {}", response); - } + if (matchingNodes.size() == 0) { + unmatchedRequirements.add(bootstrapRequirement); + } - @Override - public void handleException(TransportException exp) { - // log a warning since a failure here indicates a bad problem, such as: - // - bootstrap configuration resolution failed (e.g. discovered nodes no longer match those in the bootstrap config) - // - discovered nodes no longer form a quorum in the bootstrap config - logger.warn(new ParameterizedMessage("automatic cluster bootstrapping failed, retrying [{}]", - bootstrapConfiguration.getNodeDescriptions()), exp); - - // There's not really much else we can do apart from retry and hope that the problem goes away. The retry is delayed - // since a tight loop here is unlikely to help. - transportService.getThreadPool().scheduleUnlessShuttingDown(TimeValue.timeValueSeconds(10), Names.SAME, new Runnable() { - @Override - public void run() { - // TODO: remove the following line once schedule method properly preserves thread context - transportService.getThreadPool().getThreadContext().markAsSystemContext(); - awaitBootstrap(bootstrapConfiguration); - } - - @Override - public String toString() { - return "retry bootstrapping with " + bootstrapConfiguration.getNodeDescriptions(); - } - }); - } + if (matchingNodes.size() > 1) { + throw new IllegalStateException("requirement [" + bootstrapRequirement + "] matches multiple nodes: " + matchingNodes); + } - @Override - public String executor() { - return Names.SAME; + for (final DiscoveryNode matchingNode : matchingNodes) { + if (selectedNodes.add(matchingNode) == false) { + throw new IllegalStateException("node [" + matchingNode + "] matches multiple requirements: " + + bootstrapRequirements.stream().filter(r -> matchesRequirement(matchingNode, r)).collect(Collectors.toList())); } + } + } - @Override - public BootstrapClusterResponse read(StreamInput in) throws IOException { - return new BootstrapClusterResponse(in); - } - }); + return Tuple.tuple(selectedNodes, unmatchedRequirements); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index 9fc408fc9479c..cc58628b53893 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -34,6 +34,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Set; @@ -188,13 +189,22 @@ String getDescription() { private String describeQuorum(VotingConfiguration votingConfiguration) { final Set nodeIds = votingConfiguration.getNodeIds(); assert nodeIds.isEmpty() == false; + final int requiredNodes = nodeIds.size() / 2 + 1; + + final Set realNodeIds = new HashSet<>(nodeIds); + realNodeIds.removeIf(ClusterBootstrapService::isBootstrapPlaceholder); + assert requiredNodes <= realNodeIds.size() : nodeIds; if (nodeIds.size() == 1) { - return "a node with id " + nodeIds; + return "a node with id " + realNodeIds; } else if (nodeIds.size() == 2) { - return "two nodes with ids " + nodeIds; + return "two nodes with ids " + realNodeIds; } else { - return "at least " + (nodeIds.size() / 2 + 1) + " nodes with ids from " + nodeIds; + if (requiredNodes < realNodeIds.size()) { + return "at least " + requiredNodes + " nodes with ids from " + realNodeIds; + } else { + return requiredNodes + " nodes with ids " + realNodeIds; + } } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 72fe2e081de74..084d5cf38f2db 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -24,7 +24,6 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -83,7 +82,6 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentSet; import static org.elasticsearch.discovery.DiscoverySettings.NO_MASTER_BLOCK_ID; import static org.elasticsearch.gateway.ClusterStateUpdaters.hideStateIfNotRecovered; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; @@ -139,8 +137,6 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private JoinHelper.JoinAccumulator joinAccumulator; private Optional currentPublication = Optional.empty(); - private final Set>> discoveredNodesListeners = newConcurrentSet(); - public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSettings, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, AllocationService allocationService, MasterService masterService, Supplier persistedStateSupplier, UnicastHostsProvider unicastHostsProvider, @@ -170,7 +166,8 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe this.clusterApplier = clusterApplier; masterService.setClusterStateSupplier(this::getStateForMasterService); this.reconfigurator = new Reconfigurator(settings, clusterSettings); - this.clusterBootstrapService = new ClusterBootstrapService(settings, transportService); + this.clusterBootstrapService = new ClusterBootstrapService(settings, transportService, this::getFoundPeers, + this::isInitialConfigurationSet, this::setInitialConfiguration); this.discoveryUpgradeService = new DiscoveryUpgradeService(settings, clusterSettings, transportService, this::isInitialConfigurationSet, joinHelper, peerFinder::getFoundPeers, this::setInitialConfiguration); this.lagDetector = new LagDetector(settings, transportService.getThreadPool(), n -> removeNode(n, "lagging"), @@ -296,12 +293,6 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { becomeFollower("handlePublishRequest", sourceNode); // also updates preVoteCollector } - if (isInitialConfigurationSet()) { - for (final ActionListener> discoveredNodesListener : discoveredNodesListeners) { - discoveredNodesListener.onFailure(new ClusterAlreadyBootstrappedException()); - } - } - return new PublishWithJoinResponse(publishResponse, joinWithDestination(lastJoin, sourceNode, publishRequest.getAcceptedState().term())); } @@ -455,7 +446,7 @@ void becomeCandidate(String method) { if (mode != Mode.CANDIDATE) { mode = Mode.CANDIDATE; - cancelActivePublication(); + cancelActivePublication("become candidate: " + method); joinAccumulator.close(mode); joinAccumulator = joinHelper.new CandidateJoinAccumulator(); @@ -527,7 +518,7 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { discoveryUpgradeService.deactivate(); clusterFormationFailureHelper.stop(); closePrevotingAndElectionScheduler(); - cancelActivePublication(); + cancelActivePublication("become follower: " + method); preVoteCollector.update(getPreVoteResponse(), leaderNode); if (restartLeaderChecker) { @@ -598,16 +589,12 @@ public void startInitialJoin() { synchronized (mutex) { becomeCandidate("startInitialJoin"); } - - if (isInitialConfigurationSet() == false) { - clusterBootstrapService.start(); - } + clusterBootstrapService.scheduleUnconfiguredBootstrap(); } @Override protected void doStop() { configuredHostsResolver.stop(); - clusterBootstrapService.stop(); } @Override @@ -706,21 +693,6 @@ public boolean isInitialConfigurationSet() { return getStateForMasterService().getLastAcceptedConfiguration().isEmpty() == false; } - /** - * Sets the initial configuration by resolving the given {@link BootstrapConfiguration} to concrete nodes. This method is safe to call - * more than once, as long as each call's bootstrap configuration resolves to the same set of nodes. - * - * @param bootstrapConfiguration A description of the nodes that should form the initial configuration. - * @return whether this call successfully set the initial configuration - if false, the cluster has already been bootstrapped. - */ - public boolean setInitialConfiguration(final BootstrapConfiguration bootstrapConfiguration) { - final List selfAndDiscoveredPeers = new ArrayList<>(); - selfAndDiscoveredPeers.add(getLocalNode()); - getFoundPeers().forEach(selfAndDiscoveredPeers::add); - final VotingConfiguration votingConfiguration = bootstrapConfiguration.resolve(selfAndDiscoveredPeers); - return setInitialConfiguration(votingConfiguration); - } - /** * Sets the initial configuration to the given {@link VotingConfiguration}. This method is safe to call * more than once, as long as the argument to each call is the same. @@ -733,13 +705,10 @@ public boolean setInitialConfiguration(final VotingConfiguration votingConfigura final ClusterState currentState = getStateForMasterService(); if (isInitialConfigurationSet()) { + logger.debug("initial configuration already set, ignoring {}", votingConfiguration); return false; } - if (mode != Mode.CANDIDATE) { - throw new CoordinationStateRejectedException("Cannot set initial configuration in mode " + mode); - } - final List knownNodes = new ArrayList<>(); knownNodes.add(getLocalNode()); peerFinder.getFoundPeers().forEach(knownNodes::add); @@ -899,8 +868,6 @@ private ClusterState clusterStateWithNoMasterBlock(ClusterState clusterState) { public void publish(ClusterChangedEvent clusterChangedEvent, ActionListener publishListener, AckListener ackListener) { try { synchronized (mutex) { - assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; - if (mode != Mode.LEADER) { logger.debug(() -> new ParameterizedMessage("[{}] failed publication as not currently leading", clusterChangedEvent.source())); @@ -935,7 +902,7 @@ assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId()) @Override public void run() { synchronized (mutex) { - publication.onTimeout(); + publication.cancel("timed out after " + publishTimeout); } } @@ -991,10 +958,10 @@ public void onFailure(Exception e) { }; } - private void cancelActivePublication() { + private void cancelActivePublication(String reason) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; if (currentPublication.isPresent()) { - currentPublication.get().onTimeout(); + currentPublication.get().cancel(reason); } } @@ -1019,9 +986,8 @@ protected void onActiveMasterFound(DiscoveryNode masterNode, long term) { @Override protected void onFoundPeersUpdated() { - final Iterable foundPeers; synchronized (mutex) { - foundPeers = getFoundPeers(); + final Iterable foundPeers = getFoundPeers(); if (mode == Mode.CANDIDATE) { final CoordinationState.VoteCollection expectedVotes = new CoordinationState.VoteCollection(); foundPeers.forEach(expectedVotes::addVote); @@ -1039,9 +1005,7 @@ protected void onFoundPeersUpdated() { } } - for (final ActionListener> discoveredNodesListener : discoveredNodesListeners) { - discoveredNodesListener.onResponse(foundPeers); - } + clusterBootstrapService.onFoundPeersUpdated(); } } @@ -1076,14 +1040,6 @@ public String toString() { }); } - public Releasable withDiscoveryListener(ActionListener> listener) { - discoveredNodesListeners.add(listener); - return () -> { - boolean removed = discoveredNodesListeners.remove(listener); - assert removed : listener; - }; - } - public Iterable getFoundPeers() { // TODO everyone takes this and adds the local node. Maybe just add the local node here? return peerFinder.getFoundPeers(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java index b7f99df31c018..641f3941cb3bd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java @@ -135,7 +135,7 @@ public void setCurrentNodes(DiscoveryNodes discoveryNodes) { followerCheckers.keySet().removeIf(isUnknownNode); faultyNodes.removeIf(isUnknownNode); - for (final DiscoveryNode discoveryNode : discoveryNodes) { + discoveryNodes.mastersFirstStream().forEach(discoveryNode -> { if (discoveryNode.equals(discoveryNodes.getLocalNode()) == false && followerCheckers.containsKey(discoveryNode) == false && faultyNodes.contains(discoveryNode) == false) { @@ -144,7 +144,7 @@ public void setCurrentNodes(DiscoveryNodes discoveryNodes) { followerCheckers.put(discoveryNode, followerChecker); followerChecker.start(); } - } + }); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java index c4c76d8a8fe74..82eb26d98b97e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import java.util.ArrayList; import java.util.Collection; @@ -187,6 +188,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState .blocks(currentState.blocks()) .removeGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID)).build(); logger.trace("becomeMasterAndTrimConflictingNodes: {}", tmpState.nodes()); + tmpState = PersistentTasksCustomMetaData.deassociateDeadNodes(tmpState); return ClusterState.builder(allocationService.deassociateDeadNodes(tmpState, false, "removed dead nodes on election")); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java index 58d4b04444b2f..9d6051d0ccf03 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import java.util.List; @@ -91,8 +92,9 @@ public ClusterTasksResult execute(final ClusterState currentState, final L protected ClusterTasksResult getTaskClusterTasksResult(ClusterState currentState, List tasks, ClusterState remainingNodesClusterState) { + ClusterState ptasksDeassociatedState = PersistentTasksCustomMetaData.deassociateDeadNodes(remainingNodesClusterState); final ClusterTasksResult.Builder resultBuilder = ClusterTasksResult.builder().successes(tasks); - return resultBuilder.build(allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks))); + return resultBuilder.build(allocationService.deassociateDeadNodes(ptasksDeassociatedState, true, describeTasks(tasks))); } // visible for testing diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java index 9ec8d562b81ba..4aea820d6d9e0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java @@ -49,7 +49,7 @@ public abstract class Publication { private Optional applyCommitRequest; // set when state is committed private boolean isCompleted; // set when publication is completed - private boolean timedOut; // set when publication timed out + private boolean cancelled; // set when publication is cancelled public Publication(PublishRequest publishRequest, AckListener ackListener, LongSupplier currentTimeSupplier) { this.publishRequest = publishRequest; @@ -58,7 +58,7 @@ public Publication(PublishRequest publishRequest, AckListener ackListener, LongS startTime = currentTimeSupplier.getAsLong(); applyCommitRequest = Optional.empty(); publicationTargets = new ArrayList<>(publishRequest.getAcceptedState().getNodes().getNodes().size()); - publishRequest.getAcceptedState().getNodes().iterator().forEachRemaining(n -> publicationTargets.add(new PublicationTarget(n))); + publishRequest.getAcceptedState().getNodes().mastersFirstStream().forEach(n -> publicationTargets.add(new PublicationTarget(n))); } public void start(Set faultyNodes) { @@ -71,17 +71,17 @@ public void start(Set faultyNodes) { publicationTargets.forEach(PublicationTarget::sendPublishRequest); } - public void onTimeout() { + public void cancel(String reason) { if (isCompleted) { return; } - assert timedOut == false; - timedOut = true; + assert cancelled == false; + cancelled = true; if (applyCommitRequest.isPresent() == false) { - logger.debug("onTimeout: [{}] timed out before committing", this); + logger.debug("cancel: [{}] cancelled before committing (reason: {})", this, reason); // fail all current publications - final Exception e = new ElasticsearchException("publication timed out before committing"); + final Exception e = new ElasticsearchException("publication cancelled before committing: " + reason); publicationTargets.stream().filter(PublicationTarget::isActive).forEach(pt -> pt.setFailed(e)); } onPossibleCompletion(); @@ -101,7 +101,7 @@ private void onPossibleCompletion() { return; } - if (timedOut == false) { + if (cancelled == false) { for (final PublicationTarget target : publicationTargets) { if (target.isActive()) { return; @@ -125,8 +125,8 @@ private void onPossibleCompletion() { } // For assertions only: verify that this invariant holds - private boolean publicationCompletedIffAllTargetsInactiveOrTimedOut() { - if (timedOut == false) { + private boolean publicationCompletedIffAllTargetsInactiveOrCancelled() { + if (cancelled == false) { for (final PublicationTarget target : publicationTargets) { if (target.isActive()) { return isCompleted == false; @@ -222,7 +222,7 @@ void sendPublishRequest() { state = PublicationTargetState.SENT_PUBLISH_REQUEST; Publication.this.sendPublishRequest(discoveryNode, publishRequest, new PublishResponseHandler()); // TODO Can this ^ fail with an exception? Target should be failed if so. - assert publicationCompletedIffAllTargetsInactiveOrTimedOut(); + assert publicationCompletedIffAllTargetsInactiveOrCancelled(); } void handlePublishResponse(PublishResponse publishResponse) { @@ -245,7 +245,7 @@ void sendApplyCommit() { state = PublicationTargetState.SENT_APPLY_COMMIT; assert applyCommitRequest.isPresent(); Publication.this.sendApplyCommit(discoveryNode, applyCommitRequest.get(), new ApplyCommitResponseHandler()); - assert publicationCompletedIffAllTargetsInactiveOrTimedOut(); + assert publicationCompletedIffAllTargetsInactiveOrCancelled(); } void setAppliedCommit() { @@ -300,7 +300,7 @@ private class PublishResponseHandler implements ActionListener getCoordinatingOnlyNodes() { return nodes.build(); } + /** + * Returns a stream of all nodes, with master nodes at the front + */ + public Stream mastersFirstStream() { + return Stream.concat(StreamSupport.stream(masterNodes.spliterator(), false).map(cur -> cur.value), + StreamSupport.stream(this.spliterator(), false).filter(n -> n.isMasterNode() == false)); + } + /** * Get a node by its id * diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 59f43a193ddc8..7acf20185eedd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -425,7 +425,7 @@ private void deassociateDeadNodes(RoutingAllocation allocation) { for (ShardRouting shardRouting : node.copyShards()) { final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).nanos() > 0; - UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]", + UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left [" + node.nodeId() + "]", null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed, AllocationStatus.NO_ATTEMPT); allocation.routingNodes().failShard(logger, shardRouting, unassignedInfo, indexMetaData, allocation.changes()); } diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java index e89317ad288c0..8d83aa30b3587 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java @@ -133,9 +133,11 @@ static DateFormatter forPattern(String input) { return Joda.forPattern(input); } - // force java 8 date format + // dates starting with 8 will not be using joda but java time formatters + input = input.substring(1); + List formatters = new ArrayList<>(); - for (String pattern : Strings.delimitedListToStringArray(input.substring(1), "||")) { + for (String pattern : Strings.delimitedListToStringArray(input, "||")) { if (Strings.hasLength(pattern) == false) { throw new IllegalArgumentException("Cannot have empty element in multi date format pattern: " + input); } diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index d3bf5eb2a641c..de75356a58995 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -46,7 +46,6 @@ import static java.time.temporal.ChronoField.DAY_OF_WEEK; import static java.time.temporal.ChronoField.DAY_OF_YEAR; import static java.time.temporal.ChronoField.HOUR_OF_DAY; -import static java.time.temporal.ChronoField.MILLI_OF_SECOND; import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; import static java.time.temporal.ChronoField.MONTH_OF_YEAR; import static java.time.temporal.ChronoField.NANO_OF_SECOND; @@ -90,7 +89,7 @@ public class DateFormatters { .appendLiteral('T') .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) .optionalStart() - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .optionalEnd() .optionalStart() .appendZoneOrOffsetId() @@ -159,14 +158,14 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter BASIC_TIME_PRINTER = new DateTimeFormatterBuilder() .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .toFormatter(Locale.ROOT); /* @@ -372,7 +371,7 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .appendZoneOrOffsetId() .toFormatter(Locale.ROOT), new DateTimeFormatterBuilder() @@ -381,7 +380,7 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .append(TIME_ZONE_FORMATTER_NO_COLON) .toFormatter(Locale.ROOT) ); @@ -438,7 +437,7 @@ public class DateFormatters { .appendLiteral('T') .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) .optionalStart() - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -495,12 +494,12 @@ public class DateFormatters { // NOTE: this is not a strict formatter to retain the joda time based behaviour, even though it's named like this private static final DateTimeFormatter STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER = new DateTimeFormatterBuilder() .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER = new DateTimeFormatterBuilder() .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .toFormatter(Locale.ROOT); /* @@ -534,7 +533,7 @@ public class DateFormatters { .appendLiteral("T") .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) // this one here is lenient as well to retain joda time based bwc compatibility - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT) ); @@ -562,7 +561,7 @@ public class DateFormatters { .optionalStart() .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -586,7 +585,7 @@ public class DateFormatters { .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter STRICT_TIME_PRINTER = new DateTimeFormatterBuilder() @@ -595,7 +594,7 @@ public class DateFormatters { .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 3, true) .toFormatter(Locale.ROOT); /* @@ -819,7 +818,7 @@ public class DateFormatters { .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) .optionalEnd() .optionalStart() - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .optionalEnd() .optionalStart().appendZoneOrOffsetId().optionalEnd() .optionalStart().appendOffset("+HHmm", "Z").optionalEnd() @@ -840,7 +839,7 @@ public class DateFormatters { .appendValue(MINUTE_OF_HOUR, 1, 2, SignStyle.NOT_NEGATIVE) .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter ORDINAL_DATE_FORMATTER = new DateTimeFormatterBuilder() @@ -875,7 +874,7 @@ public class DateFormatters { private static final DateTimeFormatter TIME_PREFIX = new DateTimeFormatterBuilder() .append(TIME_NO_MILLIS_FORMATTER) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter WEEK_DATE_FORMATTER = new DateTimeFormatterBuilder() @@ -963,7 +962,7 @@ public class DateFormatters { .optionalStart() .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -1068,7 +1067,7 @@ public class DateFormatters { .optionalStart() .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(MILLI_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 3, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -1453,18 +1452,21 @@ static JavaDateFormatter merge(String pattern, List formatters) { assert formatters.size() > 0; List dateTimeFormatters = new ArrayList<>(formatters.size()); + DateTimeFormatterBuilder roundupBuilder = new DateTimeFormatterBuilder(); DateTimeFormatter printer = null; for (DateFormatter formatter : formatters) { assert formatter instanceof JavaDateFormatter; JavaDateFormatter javaDateFormatter = (JavaDateFormatter) formatter; - DateTimeFormatter dateTimeFormatter = javaDateFormatter.getParser(); if (printer == null) { printer = javaDateFormatter.getPrinter(); } - dateTimeFormatters.add(dateTimeFormatter); + dateTimeFormatters.add(javaDateFormatter.getParser()); + roundupBuilder.appendOptional(javaDateFormatter.getRoundupParser()); } + DateTimeFormatter roundUpParser = roundupBuilder.toFormatter(Locale.ROOT); - return new JavaDateFormatter(pattern, printer, dateTimeFormatters.toArray(new DateTimeFormatter[0])); + return new JavaDateFormatter(pattern, printer, builder -> builder.append(roundUpParser), + dateTimeFormatters.toArray(new DateTimeFormatter[0])); } private static final ZonedDateTime EPOCH_ZONED_DATE_TIME = Instant.EPOCH.atZone(ZoneOffset.UTC); diff --git a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java index 518957cd2eb9d..7e0f17c5f6d9c 100644 --- a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java +++ b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java @@ -153,9 +153,11 @@ public long getFrom(TemporalAccessor temporal) { .toFormatter(Locale.ROOT); static final DateFormatter SECONDS_FORMATTER = new JavaDateFormatter("epoch_second", SECONDS_FORMATTER3, + builder -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L), SECONDS_FORMATTER1, SECONDS_FORMATTER2, SECONDS_FORMATTER3); static final DateFormatter MILLIS_FORMATTER = new JavaDateFormatter("epoch_millis", MILLISECONDS_FORMATTER3, + builder -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), MILLISECONDS_FORMATTER1, MILLISECONDS_FORMATTER2, MILLISECONDS_FORMATTER3); private abstract static class EpochField implements TemporalField { diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index 0fce14b764ef1..20ef593a32610 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -32,6 +32,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.function.Consumer; class JavaDateFormatter implements DateFormatter { @@ -43,14 +44,27 @@ class JavaDateFormatter implements DateFormatter { ROUND_UP_BASE_FIELDS.put(ChronoField.HOUR_OF_DAY, 23L); ROUND_UP_BASE_FIELDS.put(ChronoField.MINUTE_OF_HOUR, 59L); ROUND_UP_BASE_FIELDS.put(ChronoField.SECOND_OF_MINUTE, 59L); - ROUND_UP_BASE_FIELDS.put(ChronoField.MILLI_OF_SECOND, 999L); + ROUND_UP_BASE_FIELDS.put(ChronoField.NANO_OF_SECOND, 999_999_999L); } private final String format; private final DateTimeFormatter printer; private final DateTimeFormatter parser; + private final DateTimeFormatter roundupParser; + + private JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter roundupParser, DateTimeFormatter parser) { + this.format = format; + this.printer = printer; + this.roundupParser = roundupParser; + this.parser = parser; + } JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { + this(format, printer, builder -> ROUND_UP_BASE_FIELDS.forEach(builder::parseDefaulting), parsers); + } + + JavaDateFormatter(String format, DateTimeFormatter printer, Consumer roundupParserConsumer, + DateTimeFormatter... parsers) { if (printer == null) { throw new IllegalArgumentException("printer may not be null"); } @@ -75,6 +89,19 @@ class JavaDateFormatter implements DateFormatter { } this.format = format; this.printer = printer; + + DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); + builder.append(this.parser); + roundupParserConsumer.accept(builder); + DateTimeFormatter roundupFormatter = builder.toFormatter(parser.getLocale()); + if (printer.getZone() != null) { + roundupFormatter = roundupFormatter.withZone(printer.getZone()); + } + this.roundupParser = roundupFormatter; + } + + DateTimeFormatter getRoundupParser() { + return roundupParser; } DateTimeFormatter getParser() { @@ -100,7 +127,7 @@ public DateFormatter withZone(ZoneId zoneId) { return this; } - return new JavaDateFormatter(format, printer.withZone(zoneId), parser.withZone(zoneId)); + return new JavaDateFormatter(format, printer.withZone(zoneId), roundupParser.withZone(zoneId), parser.withZone(zoneId)); } @Override @@ -110,7 +137,7 @@ public DateFormatter withLocale(Locale locale) { return this; } - return new JavaDateFormatter(format, printer.withLocale(locale), parser.withLocale(locale)); + return new JavaDateFormatter(format, printer.withLocale(locale), roundupParser.withLocale(locale), parser.withLocale(locale)); } @Override @@ -123,12 +150,6 @@ public String pattern() { return format; } - JavaDateFormatter parseDefaulting(Map fields) { - final DateTimeFormatterBuilder parseDefaultingBuilder = new DateTimeFormatterBuilder().append(printer); - fields.forEach(parseDefaultingBuilder::parseDefaulting); - return new JavaDateFormatter(format, parseDefaultingBuilder.toFormatter(Locale.ROOT)); - } - @Override public Locale locale() { return this.printer.getLocale(); @@ -141,7 +162,7 @@ public ZoneId zone() { @Override public DateMathParser toDateMathParser() { - return new JavaDateMathParser(this, this.parseDefaulting(ROUND_UP_BASE_FIELDS)); + return new JavaDateMathParser(parser, roundupParser); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java index 2d3fb7e176e89..3c9a1615a6c01 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.time; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; import java.time.DateTimeException; import java.time.DayOfWeek; @@ -28,6 +29,7 @@ import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.time.temporal.ChronoField; import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalAdjusters; @@ -44,12 +46,10 @@ */ public class JavaDateMathParser implements DateMathParser { + private final DateTimeFormatter formatter; + private final DateTimeFormatter roundUpFormatter; - - private final DateFormatter formatter; - private final DateFormatter roundUpFormatter; - - public JavaDateMathParser(DateFormatter formatter, DateFormatter roundUpFormatter) { + public JavaDateMathParser(DateTimeFormatter formatter, DateTimeFormatter roundUpFormatter) { Objects.requireNonNull(formatter); this.formatter = formatter; this.roundUpFormatter = roundUpFormatter; @@ -208,7 +208,11 @@ private long parseMath(final String mathString, final long time, final boolean r } private long parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNoTime) { - DateFormatter formatter = roundUpIfNoTime ? this.roundUpFormatter : this.formatter; + if (Strings.isNullOrEmpty(value)) { + throw new IllegalArgumentException("cannot parse empty date"); + } + + DateTimeFormatter formatter = roundUpIfNoTime ? this.roundUpFormatter : this.formatter; try { if (timeZone == null) { return DateFormatters.toZonedDateTime(formatter.parse(value)).toInstant().toEpochMilli(); diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 2d236b08f79de..2f676eb846770 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -59,6 +59,7 @@ import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.node.Node; import java.io.Closeable; import java.io.IOException; @@ -309,6 +310,11 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce if (DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings)) { ensureAtomicMoveSupported(nodePaths); } + + if (DiscoveryNode.isDataNode(settings) == false) { + ensureNoShardData(nodePaths); + } + success = true; } finally { if (success == false) { @@ -1030,6 +1036,38 @@ private static void ensureAtomicMoveSupported(final NodePath[] nodePaths) throws } } + private void ensureNoShardData(final NodePath[] nodePaths) throws IOException { + List shardDataPaths = new ArrayList<>(); + for (NodePath nodePath : nodePaths) { + Path indicesPath = nodePath.indicesPath; + if (Files.isDirectory(indicesPath)) { + try (DirectoryStream indexStream = Files.newDirectoryStream(indicesPath)) { + for (Path indexPath : indexStream) { + if (Files.isDirectory(indexPath)) { + try (Stream shardStream = Files.list(indexPath)) { + shardStream.filter(this::isShardPath) + .map(Path::toAbsolutePath) + .forEach(shardDataPaths::add); + } + } + } + } + } + } + + if (shardDataPaths.isEmpty() == false) { + throw new IllegalStateException("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false, but has shard data: " + + shardDataPaths); + } + } + + private boolean isShardPath(Path path) { + return Files.isDirectory(path) + && path.getFileName().toString().chars().allMatch(Character::isDigit); + } + /** * Resolve the custom path for a index's shard. * Uses the {@code IndexMetaData.SETTING_DATA_PATH} setting to determine @@ -1140,3 +1178,4 @@ private static void tryWriteTempFile(Path path) throws IOException { } } } + diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index 92f305cb67a1f..b79a5e77309b9 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -278,16 +278,6 @@ protected void onServerException(HttpServerChannel channel, Exception e) { logger.error(new ParameterizedMessage("exception from http server channel caught on transport layer [channel={}]", channel), e); } - /** - * Exception handler for exceptions that are not associated with a specific channel. - * - * @param exception the exception - */ - protected void onNonChannelException(Exception exception) { - String threadName = Thread.currentThread().getName(); - logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", threadName), exception); - } - protected void serverAcceptedChannel(HttpChannel httpChannel) { boolean addedOnThisCall = httpChannels.add(httpChannel); assert addedOnThisCall : "Channel should only be added to http channel set once"; diff --git a/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java index 0ce8edcf3b662..de345a39fd6d9 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java @@ -29,8 +29,6 @@ public interface HttpServerTransport extends LifecycleComponent { String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker"; - String HTTP_SERVER_ACCEPTOR_THREAD_NAME_PREFIX = "http_server_acceptor"; - BoundTransportAddress boundAddress(); HttpInfo info(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index c926a5a47191e..5c09708b62cae 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -30,6 +30,8 @@ import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; +import org.elasticsearch.Assertions; +import org.elasticsearch.Version; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.core.internal.io.IOUtils; @@ -98,7 +100,25 @@ public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats indexWriterLock = obtainLock ? directory.obtainLock(IndexWriter.WRITE_LOCK_NAME) : null; this.lastCommittedSegmentInfos = Lucene.readSegmentInfos(directory); this.translogStats = translogStats == null ? new TranslogStats(0, 0, 0, 0, 0) : translogStats; - this.seqNoStats = seqNoStats == null ? buildSeqNoStats(lastCommittedSegmentInfos) : seqNoStats; + if (seqNoStats == null) { + seqNoStats = buildSeqNoStats(lastCommittedSegmentInfos); + // During a peer-recovery the global checkpoint is not known and up to date when the engine + // is created, so we only check the max seq no / global checkpoint coherency when the global + // checkpoint is different from the unassigned sequence number value. + // In addition to that we only execute the check if the index the engine belongs to has been + // created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction + // that guarantee that all operations have been flushed to Lucene. + final long globalCheckpoint = engineConfig.getGlobalCheckpointSupplier().getAsLong(); + if (globalCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO + && engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_7_0)) { + if (seqNoStats.getMaxSeqNo() != globalCheckpoint) { + assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), globalCheckpoint); + throw new IllegalStateException("Maximum sequence number [" + seqNoStats.getMaxSeqNo() + + "] from last commit does not match global checkpoint [" + globalCheckpoint + "]"); + } + } + } + this.seqNoStats = seqNoStats; this.indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, directory); reader = open(indexCommit); reader = wrapReader(reader, readerWrapperFunction); @@ -116,6 +136,12 @@ public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats } } + protected void assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) { + if (Assertions.ENABLED) { + assert false : "max seq. no. [" + maxSeqNo + "] does not match [" + globalCheckpoint + "]"; + } + } + protected final DirectoryReader wrapReader(DirectoryReader reader, Function readerWrapperFunction) throws IOException { reader = ElasticsearchDirectoryReader.wrap(reader, engineConfig.getShardId()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 79b2b0c4c67d1..32c44fd5f55a0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -73,21 +73,14 @@ public static class Defaults { } } - public static class Builder extends MetadataFieldMapper.Builder { + private static class Builder extends MetadataFieldMapper.Builder { private boolean enabled = Defaults.ENABLED; - public Builder(MappedFieldType existing) { + private Builder(MappedFieldType existing) { super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); } - @Override - @Deprecated - public Builder index(boolean index) { - enabled(index); - return super.index(index); - } - - public Builder enabled(boolean enabled) { + private Builder enabled(boolean enabled) { this.enabled = enabled; return this; } diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java index 7f42eb137190d..b39f2ab5a91e6 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java @@ -275,6 +275,11 @@ public MatchesIterator matches(String field, LeafReaderContext ctx, int doc) { return null; } + @Override + public int minExtent() { + return 0; + } + @Override public void extractTerms(String field, Set terms) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 3a48702fd552b..bc2196501fb8e 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -431,13 +431,13 @@ public interface RecoveryListener { class PrepareForTranslogOperationsRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel, - Task task) throws Exception { - try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId() - )) { - recoveryRef.target().prepareForTranslogOperations(request.isFileBasedRecovery(), request.totalTranslogOps()); + public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel, Task task) { + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { + final ActionListener listener = + new HandledTransportAction.ChannelActionListener<>(channel, Actions.PREPARE_TRANSLOG, request); + recoveryRef.target().prepareForTranslogOperations(request.isFileBasedRecovery(), request.totalTranslogOps(), + ActionListener.wrap(nullVal -> listener.onResponse(TransportResponse.Empty.INSTANCE), listener::onFailure)); } - channel.sendResponse(TransportResponse.Empty.INSTANCE); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 34434f50b456f..f360a68b7a83c 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -197,51 +197,51 @@ public void recoverToTarget(ActionListener listener) { assert requiredSeqNoRangeStart >= startingSeqNo : "requiredSeqNoRangeStart [" + requiredSeqNoRangeStart + "] is lower than [" + startingSeqNo + "]"; - final TimeValue prepareEngineTime; - try { - // For a sequence based recovery, the target can keep its local translog - prepareEngineTime = prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, - shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo)); - } catch (final Exception e) { - throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e); - } + final StepListener prepareEngineStep = new StepListener<>(); + // For a sequence based recovery, the target can keep its local translog + prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, + shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo), prepareEngineStep); + final StepListener sendSnapshotStep = new StepListener<>(); + prepareEngineStep.whenComplete(prepareEngineTime -> { + /* + * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. + * This means that any document indexed into the primary after this will be replicated to this replica as well + * make sure to do this before sampling the max sequence number in the next step, to ensure that we send + * all documents up to maxSeqNo in phase2. + */ + runUnderPrimaryPermit(() -> shard.initiateTracking(request.targetAllocationId()), + shardId + " initiating tracking of " + request.targetAllocationId(), shard, cancellableThreads, logger); - /* - * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. - * This means that any document indexed into the primary after this will be replicated to this replica as well - * make sure to do this before sampling the max sequence number in the next step, to ensure that we send - * all documents up to maxSeqNo in phase2. - */ - runUnderPrimaryPermit(() -> shard.initiateTracking(request.targetAllocationId()), - shardId + " initiating tracking of " + request.targetAllocationId(), shard, cancellableThreads, logger); - - final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); - /* - * We need to wait for all operations up to the current max to complete, otherwise we can not guarantee that all - * operations in the required range will be available for replaying from the translog of the source. - */ - cancellableThreads.execute(() -> shard.waitForOpsToComplete(endingSeqNo)); - - if (logger.isTraceEnabled()) { - logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo); - logger.trace("snapshot translog for recovery; current size is [{}]", - shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo)); - } + final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); + /* + * We need to wait for all operations up to the current max to complete, otherwise we can not guarantee that all + * operations in the required range will be available for replaying from the translog of the source. + */ + cancellableThreads.execute(() -> shard.waitForOpsToComplete(endingSeqNo)); + if (logger.isTraceEnabled()) { + logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo); + logger.trace("snapshot translog for recovery; current size is [{}]", + shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo)); + } + final Translog.Snapshot phase2Snapshot = shard.getHistoryOperations("peer-recovery", startingSeqNo); + resources.add(phase2Snapshot); + // we can release the retention lock here because the snapshot itself will retain the required operations. + retentionLock.close(); + // we have to capture the max_seen_auto_id_timestamp and the max_seq_no_of_updates to make sure that these values + // are at least as high as the corresponding values on the primary when any of these operations were executed on it. + final long maxSeenAutoIdTimestamp = shard.getMaxSeenAutoIdTimestamp(); + final long maxSeqNoOfUpdatesOrDeletes = shard.getMaxSeqNoOfUpdatesOrDeletes(); + phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, phase2Snapshot, maxSeenAutoIdTimestamp, + maxSeqNoOfUpdatesOrDeletes, sendSnapshotStep); + sendSnapshotStep.whenComplete( + r -> IOUtils.close(phase2Snapshot), + e -> { + IOUtils.closeWhileHandlingException(phase2Snapshot); + onFailure.accept(new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e)); + }); + + }, onFailure); - final Translog.Snapshot phase2Snapshot = shard.getHistoryOperations("peer-recovery", startingSeqNo); - resources.add(phase2Snapshot); - // we can release the retention lock here because the snapshot itself will retain the required operations. - IOUtils.close(retentionLock); - // we have to capture the max_seen_auto_id_timestamp and the max_seq_no_of_updates to make sure that these values - // are at least as high as the corresponding values on the primary when any of these operations were executed on it. - final long maxSeenAutoIdTimestamp = shard.getMaxSeenAutoIdTimestamp(); - final long maxSeqNoOfUpdatesOrDeletes = shard.getMaxSeqNoOfUpdatesOrDeletes(); - final StepListener sendSnapshotStep = new StepListener<>(); - phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, phase2Snapshot, maxSeenAutoIdTimestamp, - maxSeqNoOfUpdatesOrDeletes, sendSnapshotStep); - sendSnapshotStep.whenComplete( - r -> IOUtils.close(phase2Snapshot), - e -> onFailure.accept(new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e))); final StepListener finalizeStep = new StepListener<>(); sendSnapshotStep.whenComplete(r -> finalizeRecovery(r.targetLocalCheckpoint, finalizeStep), onFailure); @@ -251,7 +251,7 @@ public void recoverToTarget(ActionListener listener) { final RecoveryResponse response = new RecoveryResponse(sendFileResult.phase1FileNames, sendFileResult.phase1FileSizes, sendFileResult.phase1ExistingFileNames, sendFileResult.phase1ExistingFileSizes, sendFileResult.totalSize, sendFileResult.existingTotalSize, sendFileResult.took.millis(), phase1ThrottlingWaitTime, - prepareEngineTime.millis(), sendSnapshotResult.totalOperations, sendSnapshotResult.tookTime.millis()); + prepareEngineStep.result().millis(), sendSnapshotResult.totalOperations, sendSnapshotResult.tookTime.millis()); try { wrappedListener.onResponse(response); } finally { @@ -484,16 +484,21 @@ public SendFileResult phase1(final IndexCommit snapshot, final Supplier } } - TimeValue prepareTargetForTranslog(final boolean fileBasedRecovery, final int totalTranslogOps) throws IOException { + void prepareTargetForTranslog(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { StopWatch stopWatch = new StopWatch().start(); - logger.trace("recovery [phase1]: prepare remote engine for translog"); + final ActionListener wrappedListener = ActionListener.wrap( + nullVal -> { + stopWatch.stop(); + final TimeValue tookTime = stopWatch.totalTime(); + logger.trace("recovery [phase1]: remote engine start took [{}]", tookTime); + listener.onResponse(tookTime); + }, + e -> listener.onFailure(new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e))); // Send a request preparing the new shard's translog to receive operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes. - cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps)); - stopWatch.stop(); - final TimeValue tookTime = stopWatch.totalTime(); - logger.trace("recovery [phase1]: remote engine start took [{}]", tookTime); - return tookTime; + logger.trace("recovery [phase1]: prepare remote engine for translog"); + cancellableThreads.execute(() -> + recoveryTarget.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps, wrappedListener)); } /** diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 5950f71006a85..b300490542968 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -366,9 +366,12 @@ private void ensureRefCount() { /*** Implementation of {@link RecoveryTargetHandler } */ @Override - public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { - state().getTranslog().totalOperations(totalTranslogOps); - indexShard().openEngineAndSkipTranslogRecovery(); + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { + ActionListener.completeWith(listener, () -> { + state().getTranslog().totalOperations(totalTranslogOps); + indexShard().openEngineAndSkipTranslogRecovery(); + return null; + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java index 3372c933559c6..0fa5e94c63cd2 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java @@ -35,7 +35,7 @@ public interface RecoveryTargetHandler { * @param fileBasedRecovery whether or not this call is part of an file based recovery * @param totalTranslogOps total translog operations expected to be sent */ - void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException; + void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener); /** * The finalize request refreshes the engine now that new segments are available, enables garbage collection of tombstone files, and diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index ba703aeee2850..0799cc6595189 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -77,11 +77,12 @@ public RemoteRecoveryTargetHandler(long recoveryId, ShardId shardId, TransportSe } @Override - public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG, - new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps, fileBasedRecovery), - TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), - EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps, fileBasedRecovery), + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), + new ActionListenerResponseHandler<>(ActionListener.wrap(r -> listener.onResponse(null), listener::onFailure), + in -> TransportResponse.Empty.INSTANCE, ThreadPool.Names.GENERIC)); } @Override diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java index b7a179e41e381..01e3d7450a60d 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java @@ -62,6 +62,7 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable> tasks; @@ -119,6 +120,11 @@ public PersistentTasksCustomMetaData(long lastAllocationId, Map PersistentTask getTa return null; } + /** + * Unassign any persistent tasks executing on nodes that are no longer in + * the cluster. If the task's assigment has a non-null executor node and that + * node is no longer in the cluster then the assignment is set to + * {@link #LOST_NODE_ASSIGNMENT} + * + * @param clusterState The clusterstate + * @return If no changes the argument {@code clusterState} is returned else + * a copy with the modified tasks + */ + public static ClusterState deassociateDeadNodes(ClusterState clusterState) { + PersistentTasksCustomMetaData tasks = getPersistentTasksCustomMetaData(clusterState); + if (tasks == null) { + return clusterState; + } + + PersistentTasksCustomMetaData.Builder taskBuilder = PersistentTasksCustomMetaData.builder(tasks); + for (PersistentTask task : tasks.tasks()) { + if (task.getAssignment().getExecutorNode() != null && + clusterState.nodes().nodeExists(task.getAssignment().getExecutorNode()) == false) { + taskBuilder.reassignTask(task.getId(), LOST_NODE_ASSIGNMENT); + } + } + + if (taskBuilder.isChanged() == false) { + return clusterState; + } + + MetaData.Builder metaDataBuilder = MetaData.builder(clusterState.metaData()); + metaDataBuilder.putCustom(TYPE, taskBuilder.build()); + return ClusterState.builder(clusterState).metaData(metaDataBuilder).build(); + } + public static class Assignment { @Nullable private final String executorNode; diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 7fd68852ce284..cb34804981f1b 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -121,6 +121,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable s.getName().equals(snapshotName)) .findFirst(); // if nothing found by the same name, then look in the cluster state for current in progress snapshots + long repoGenId = repositoryData.getGenId(); if (matchedEntry.isPresent() == false) { - matchedEntry = currentSnapshots(repositoryName, Collections.emptyList()).stream() - .map(e -> e.snapshot().getSnapshotId()).filter(s -> s.getName().equals(snapshotName)).findFirst(); + Optional matchedInProgress = currentSnapshots(repositoryName, Collections.emptyList()).stream() + .filter(s -> s.snapshot().getSnapshotId().getName().equals(snapshotName)).findFirst(); + if (matchedInProgress.isPresent()) { + matchedEntry = matchedInProgress.map(s -> s.snapshot().getSnapshotId()); + // Derive repository generation if a snapshot is in progress because it will increment the generation when it finishes + repoGenId = matchedInProgress.get().getRepositoryStateId() + 1L; + } } if (matchedEntry.isPresent() == false) { throw new SnapshotMissingException(repositoryName, snapshotName); } - deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener, repositoryData.getGenId(), immediatePriority); + deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener, repoGenId, immediatePriority); } /** diff --git a/server/src/main/java/org/elasticsearch/transport/CompressibleBytesOutputStream.java b/server/src/main/java/org/elasticsearch/transport/CompressibleBytesOutputStream.java index 54f4d1d0c8d84..4116f88b14224 100644 --- a/server/src/main/java/org/elasticsearch/transport/CompressibleBytesOutputStream.java +++ b/server/src/main/java/org/elasticsearch/transport/CompressibleBytesOutputStream.java @@ -39,8 +39,8 @@ * written to this stream. If compression is enabled, the proper EOS bytes will be written at that point. * The underlying {@link BytesReference} will be returned. * - * {@link CompressibleBytesOutputStream#close()} should be called when the bytes are no longer needed and - * can be safely released. + * {@link CompressibleBytesOutputStream#close()} will NOT close the underlying stream. The byte stream passed + * in the constructor must be closed individually. */ final class CompressibleBytesOutputStream extends StreamOutput { @@ -92,12 +92,9 @@ public void flush() throws IOException { @Override public void close() throws IOException { - if (stream == bytesStreamOutput) { - assert shouldCompress == false : "If the streams are the same we should not be compressing"; - IOUtils.close(stream); - } else { + if (stream != bytesStreamOutput) { assert shouldCompress : "If the streams are different we should be compressing"; - IOUtils.close(stream, bytesStreamOutput); + IOUtils.close(stream); } } diff --git a/server/src/main/java/org/elasticsearch/transport/InboundMessage.java b/server/src/main/java/org/elasticsearch/transport/InboundMessage.java new file mode 100644 index 0000000000000..44e3b017ed27e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/InboundMessage.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.Compressor; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.compress.NotCompressedException; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.internal.io.IOUtils; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.Set; +import java.util.TreeSet; + +public abstract class InboundMessage extends NetworkMessage implements Closeable { + + private final StreamInput streamInput; + + InboundMessage(ThreadContext threadContext, Version version, byte status, long requestId, StreamInput streamInput) { + super(threadContext, version, status, requestId); + this.streamInput = streamInput; + } + + StreamInput getStreamInput() { + return streamInput; + } + + static class Reader { + + private final Version version; + private final NamedWriteableRegistry namedWriteableRegistry; + private final ThreadContext threadContext; + + Reader(Version version, NamedWriteableRegistry namedWriteableRegistry, ThreadContext threadContext) { + this.version = version; + this.namedWriteableRegistry = namedWriteableRegistry; + this.threadContext = threadContext; + } + + InboundMessage deserialize(BytesReference reference) throws IOException { + int messageLengthBytes = reference.length(); + final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; + // we have additional bytes to read, outside of the header + boolean hasMessageBytesToRead = (totalMessageSize - TcpHeader.HEADER_SIZE) > 0; + StreamInput streamInput = reference.streamInput(); + boolean success = false; + try (ThreadContext.StoredContext existing = threadContext.stashContext()) { + long requestId = streamInput.readLong(); + byte status = streamInput.readByte(); + Version remoteVersion = Version.fromId(streamInput.readInt()); + final boolean isHandshake = TransportStatus.isHandshake(status); + ensureVersionCompatibility(remoteVersion, version, isHandshake); + if (TransportStatus.isCompress(status) && hasMessageBytesToRead && streamInput.available() > 0) { + Compressor compressor; + try { + final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE; + compressor = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)); + } catch (NotCompressedException ex) { + int maxToRead = Math.min(reference.length(), 10); + StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [") + .append(maxToRead).append("] content bytes out of [").append(reference.length()) + .append("] readable bytes with message size [").append(messageLengthBytes).append("] ").append("] are ["); + for (int i = 0; i < maxToRead; i++) { + sb.append(reference.get(i)).append(","); + } + sb.append("]"); + throw new IllegalStateException(sb.toString()); + } + streamInput = compressor.streamInput(streamInput); + } + streamInput = new NamedWriteableAwareStreamInput(streamInput, namedWriteableRegistry); + streamInput.setVersion(remoteVersion); + + threadContext.readHeaders(streamInput); + + InboundMessage message; + if (TransportStatus.isRequest(status)) { + final Set features; + if (remoteVersion.onOrAfter(Version.V_6_3_0)) { + features = Collections.unmodifiableSet(new TreeSet<>(Arrays.asList(streamInput.readStringArray()))); + } else { + features = Collections.emptySet(); + } + final String action = streamInput.readString(); + message = new RequestMessage(threadContext, remoteVersion, status, requestId, action, features, streamInput); + } else { + message = new ResponseMessage(threadContext, remoteVersion, status, requestId, streamInput); + } + success = true; + return message; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(streamInput); + } + } + } + } + + @Override + public void close() throws IOException { + streamInput.close(); + } + + private static void ensureVersionCompatibility(Version version, Version currentVersion, boolean isHandshake) { + // for handshakes we are compatible with N-2 since otherwise we can't figure out our initial version + // since we are compatible with N-1 and N+1 so we always send our minCompatVersion as the initial version in the + // handshake. This looks odd but it's required to establish the connection correctly we check for real compatibility + // once the connection is established + final Version compatibilityVersion = isHandshake ? currentVersion.minimumCompatibilityVersion() : currentVersion; + if (version.isCompatible(compatibilityVersion) == false) { + final Version minCompatibilityVersion = isHandshake ? compatibilityVersion : compatibilityVersion.minimumCompatibilityVersion(); + String msg = "Received " + (isHandshake ? "handshake " : "") + "message from unsupported version: ["; + throw new IllegalStateException(msg + version + "] minimal compatible version is: [" + minCompatibilityVersion + "]"); + } + } + + public static class RequestMessage extends InboundMessage { + + private final String actionName; + private final Set features; + + RequestMessage(ThreadContext threadContext, Version version, byte status, long requestId, String actionName, Set features, + StreamInput streamInput) { + super(threadContext, version, status, requestId, streamInput); + this.actionName = actionName; + this.features = features; + } + + String getActionName() { + return actionName; + } + + Set getFeatures() { + return features; + } + } + + public static class ResponseMessage extends InboundMessage { + + ResponseMessage(ThreadContext threadContext, Version version, byte status, long requestId, StreamInput streamInput) { + super(threadContext, version, status, requestId, streamInput); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java new file mode 100644 index 0000000000000..7d8dbb8a0f1b6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +/** + * Represents a transport message sent over the network. Subclasses implement serialization and + * deserialization. + */ +public abstract class NetworkMessage { + + protected final Version version; + protected final ThreadContext threadContext; + protected final ThreadContext.StoredContext storedContext; + protected final long requestId; + protected final byte status; + + NetworkMessage(ThreadContext threadContext, Version version, byte status, long requestId) { + this.threadContext = threadContext; + storedContext = threadContext.stashContext(); + storedContext.restore(); + this.version = version; + this.requestId = requestId; + this.status = status; + } + + public Version getVersion() { + return version; + } + + public long getRequestId() { + return requestId; + } + + boolean isCompress() { + return TransportStatus.isCompress(status); + } + + ThreadContext.StoredContext getStoredContext() { + return storedContext; + } + + boolean isResponse() { + return TransportStatus.isRequest(status) == false; + } + + boolean isRequest() { + return TransportStatus.isRequest(status); + } + + boolean isHandshake() { + return TransportStatus.isHandshake(status); + } + + boolean isError() { + return TransportStatus.isError(status); + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java new file mode 100644 index 0000000000000..31c67d930c538 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java @@ -0,0 +1,167 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.NotifyOnceListener; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.network.CloseableChannel; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; + +final class OutboundHandler { + + private static final Logger logger = LogManager.getLogger(OutboundHandler.class); + + private final MeanMetric transmittedBytesMetric = new MeanMetric(); + private final ThreadPool threadPool; + private final BigArrays bigArrays; + private final TransportLogger transportLogger; + + OutboundHandler(ThreadPool threadPool, BigArrays bigArrays, TransportLogger transportLogger) { + this.threadPool = threadPool; + this.bigArrays = bigArrays; + this.transportLogger = transportLogger; + } + + void sendBytes(TcpChannel channel, BytesReference bytes, ActionListener listener) { + channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); + SendContext sendContext = new SendContext(channel, () -> bytes, listener); + try { + internalSendMessage(channel, sendContext); + } catch (IOException e) { + // This should not happen as the bytes are already serialized + throw new AssertionError(e); + } + } + + void sendMessage(TcpChannel channel, OutboundMessage networkMessage, ActionListener listener) throws IOException { + channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); + MessageSerializer serializer = new MessageSerializer(networkMessage, bigArrays); + SendContext sendContext = new SendContext(channel, serializer, listener, serializer); + internalSendMessage(channel, sendContext); + } + + /** + * sends a message to the given channel, using the given callbacks. + */ + private void internalSendMessage(TcpChannel channel, SendContext sendContext) throws IOException { + channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); + BytesReference reference = sendContext.get(); + try { + channel.sendMessage(reference, sendContext); + } catch (RuntimeException ex) { + sendContext.onFailure(ex); + CloseableChannel.closeChannel(channel); + throw ex; + } + + } + + MeanMetric getTransmittedBytes() { + return transmittedBytesMetric; + } + + private static class MessageSerializer implements CheckedSupplier, Releasable { + + private final OutboundMessage message; + private final BigArrays bigArrays; + private volatile ReleasableBytesStreamOutput bytesStreamOutput; + + private MessageSerializer(OutboundMessage message, BigArrays bigArrays) { + this.message = message; + this.bigArrays = bigArrays; + } + + @Override + public BytesReference get() throws IOException { + bytesStreamOutput = new ReleasableBytesStreamOutput(bigArrays); + return message.serialize(bytesStreamOutput); + } + + @Override + public void close() { + IOUtils.closeWhileHandlingException(bytesStreamOutput); + } + } + + private class SendContext extends NotifyOnceListener implements CheckedSupplier { + + private final TcpChannel channel; + private final CheckedSupplier messageSupplier; + private final ActionListener listener; + private final Releasable optionalReleasable; + private long messageSize = -1; + + private SendContext(TcpChannel channel, CheckedSupplier messageSupplier, + ActionListener listener) { + this(channel, messageSupplier, listener, null); + } + + private SendContext(TcpChannel channel, CheckedSupplier messageSupplier, + ActionListener listener, Releasable optionalReleasable) { + this.channel = channel; + this.messageSupplier = messageSupplier; + this.listener = listener; + this.optionalReleasable = optionalReleasable; + } + + public BytesReference get() throws IOException { + BytesReference message; + try { + message = messageSupplier.get(); + messageSize = message.length(); + transportLogger.logOutboundMessage(channel, message); + return message; + } catch (Exception e) { + onFailure(e); + throw e; + } + } + + @Override + protected void innerOnResponse(Void v) { + assert messageSize != -1 : "If onResponse is being called, the message should have been serialized"; + transmittedBytesMetric.inc(messageSize); + closeAndCallback(() -> listener.onResponse(v)); + } + + @Override + protected void innerOnFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("send message failed [channel: {}]", channel), e); + closeAndCallback(() -> listener.onFailure(e)); + } + + private void closeAndCallback(Runnable runnable) { + Releasables.close(optionalReleasable, runnable::run); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java new file mode 100644 index 0000000000000..cc295a68df3c7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.io.IOException; +import java.util.Set; + +abstract class OutboundMessage extends NetworkMessage implements Writeable { + + private final Writeable message; + + OutboundMessage(ThreadContext threadContext, Version version, byte status, long requestId, Writeable message) { + super(threadContext, version, status, requestId); + this.message = message; + } + + BytesReference serialize(BytesStreamOutput bytesStream) throws IOException { + storedContext.restore(); + bytesStream.setVersion(version); + bytesStream.skip(TcpHeader.HEADER_SIZE); + + // The compressible bytes stream will not close the underlying bytes stream + BytesReference reference; + try (CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bytesStream, TransportStatus.isCompress(status))) { + stream.setVersion(version); + threadContext.writeTo(stream); + writeTo(stream); + reference = writeMessage(stream); + } + bytesStream.seek(0); + TcpHeader.writeHeader(bytesStream, requestId, status, version, reference.length() - TcpHeader.HEADER_SIZE); + return reference; + } + + private BytesReference writeMessage(CompressibleBytesOutputStream stream) throws IOException { + final BytesReference zeroCopyBuffer; + if (message instanceof BytesTransportRequest) { + BytesTransportRequest bRequest = (BytesTransportRequest) message; + bRequest.writeThin(stream); + zeroCopyBuffer = bRequest.bytes; + } else if (message instanceof RemoteTransportException) { + stream.writeException((RemoteTransportException) message); + zeroCopyBuffer = BytesArray.EMPTY; + } else { + message.writeTo(stream); + zeroCopyBuffer = BytesArray.EMPTY; + } + // we have to call materializeBytes() here before accessing the bytes. A CompressibleBytesOutputStream + // might be implementing compression. And materializeBytes() ensures that some marker bytes (EOS marker) + // are written. Otherwise we barf on the decompressing end when we read past EOF on purpose in the + // #validateRequest method. this might be a problem in deflate after all but it's important to write + // the marker bytes. + final BytesReference message = stream.materializeBytes(); + if (zeroCopyBuffer.length() == 0) { + return message; + } else { + return new CompositeBytesReference(message, zeroCopyBuffer); + } + } + + static class Request extends OutboundMessage { + + private final String[] features; + private final String action; + + Request(ThreadContext threadContext, String[] features, Writeable message, Version version, String action, long requestId, + boolean isHandshake, boolean compress) { + super(threadContext, version, setStatus(compress, isHandshake, message), requestId, message); + this.features = features; + this.action = action; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (version.onOrAfter(Version.V_6_3_0)) { + out.writeStringArray(features); + } + out.writeString(action); + } + + private static byte setStatus(boolean compress, boolean isHandshake, Writeable message) { + byte status = 0; + status = TransportStatus.setRequest(status); + if (compress && OutboundMessage.canCompress(message)) { + status = TransportStatus.setCompress(status); + } + if (isHandshake) { + status = TransportStatus.setHandshake(status); + } + + return status; + } + } + + static class Response extends OutboundMessage { + + private final Set features; + + Response(ThreadContext threadContext, Set features, Writeable message, Version version, long requestId, + boolean isHandshake, boolean compress) { + super(threadContext, version, setStatus(compress, isHandshake, message), requestId, message); + this.features = features; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.setFeatures(features); + } + + private static byte setStatus(boolean compress, boolean isHandshake, Writeable message) { + byte status = 0; + status = TransportStatus.setResponse(status); + if (message instanceof RemoteTransportException) { + status = TransportStatus.setError(status); + } + if (compress) { + status = TransportStatus.setCompress(status); + } + if (isHandshake) { + status = TransportStatus.setHandshake(status); + } + + return status; + } + } + + private static boolean canCompress(Writeable message) { + return message instanceof BytesTransportRequest == false; + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index dd6a21acc8fbd..e0b466e2ec631 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -26,24 +26,16 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.NotifyOnceListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.CompositeBytesReference; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; -import org.elasticsearch.common.compress.Compressor; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.compress.NotCompressedException; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.metrics.MeanMetric; @@ -64,17 +56,14 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.Node; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; -import java.io.Closeable; import java.io.IOException; import java.io.StreamCorruptedException; -import java.io.UncheckedIOException; import java.net.BindException; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -136,8 +125,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final Map> serverChannels = newConcurrentMap(); private final Set acceptedChannels = ConcurrentCollections.newConcurrentSet(); - private final NamedWriteableRegistry namedWriteableRegistry; - // this lock is here to make sure we close this transport and disconnect all the client nodes // connections while no connect operations is going on private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); @@ -145,15 +132,16 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final String transportName; private final MeanMetric readBytesMetric = new MeanMetric(); - private final MeanMetric transmittedBytesMetric = new MeanMetric(); private volatile Map> requestHandlers = Collections.emptyMap(); private final ResponseHandlers responseHandlers = new ResponseHandlers(); private final TransportLogger transportLogger; private final TransportHandshaker handshaker; private final TransportKeepAlive keepAlive; + private final InboundMessage.Reader reader; + private final OutboundHandler outboundHandler; private final String nodeName; - public TcpTransport(String transportName, Settings settings, Version version, ThreadPool threadPool, + public TcpTransport(String transportName, Settings settings, Version version, ThreadPool threadPool, PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { this.settings = settings; @@ -163,17 +151,18 @@ public TcpTransport(String transportName, Settings settings, Version version, T this.bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService, CircuitBreaker.IN_FLIGHT_REQUESTS); this.pageCacheRecycler = pageCacheRecycler; this.circuitBreakerService = circuitBreakerService; - this.namedWriteableRegistry = namedWriteableRegistry; this.networkService = networkService; this.transportName = transportName; this.transportLogger = new TransportLogger(); + this.outboundHandler = new OutboundHandler(threadPool, bigArrays, transportLogger); this.handshaker = new TransportHandshaker(version, threadPool, (node, channel, requestId, v) -> sendRequestToChannel(node, channel, requestId, TransportHandshaker.HANDSHAKE_ACTION_NAME, new TransportHandshaker.HandshakeRequest(version), - TransportRequestOptions.EMPTY, v, false, TransportStatus.setHandshake((byte) 0)), + TransportRequestOptions.EMPTY, v, false, true), (v, features, channel, response, requestId) -> sendResponse(v, features, channel, response, requestId, - TransportHandshaker.HANDSHAKE_ACTION_NAME, false, TransportStatus.setHandshake((byte) 0))); - this.keepAlive = new TransportKeepAlive(threadPool, this::internalSendMessage); + TransportHandshaker.HANDSHAKE_ACTION_NAME, false, true)); + this.keepAlive = new TransportKeepAlive(threadPool, this.outboundHandler::sendBytes); + this.reader = new InboundMessage.Reader(version, namedWriteableRegistry, threadPool.getThreadContext()); this.nodeName = Node.NODE_NAME_SETTING.get(settings); final Settings defaultFeatures = TransportSettings.DEFAULT_FEATURES_SETTING.get(settings); @@ -280,7 +269,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, throw new NodeNotConnectedException(node, "connection already closed"); } TcpChannel channel = channel(options.type()); - sendRequestToChannel(this.node, channel, requestId, action, request, options, getVersion(), compress, (byte) 0); + sendRequestToChannel(this.node, channel, requestId, action, request, options, getVersion(), compress); } } @@ -573,7 +562,8 @@ protected final void doStop() { for (Map.Entry> entry : serverChannels.entrySet()) { String profile = entry.getKey(); List channels = entry.getValue(); - ActionListener closeFailLogger = ActionListener.wrap(c -> {}, + ActionListener closeFailLogger = ActionListener.wrap(c -> { + }, e -> logger.warn(() -> new ParameterizedMessage("Error closing serverChannel for profile [{}]", profile), e)); channels.forEach(c -> c.addCloseListener(closeFailLogger)); CloseableChannel.closeChannels(channels, true); @@ -628,26 +618,7 @@ public void onException(TcpChannel channel, Exception e) { // in case we are able to return data, serialize the exception content and sent it back to the client if (channel.isOpen()) { BytesArray message = new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)); - ActionListener listener = new ActionListener() { - @Override - public void onResponse(Void aVoid) { - CloseableChannel.closeChannel(channel); - } - - @Override - public void onFailure(Exception e) { - logger.debug("failed to send message to httpOnTransport channel", e); - CloseableChannel.closeChannel(channel); - } - }; - // We do not call internalSendMessage because we are not sending a message that is an - // elasticsearch binary message. We are just serializing an exception here. Not formatting it - // as an elasticsearch transport message. - try { - channel.sendMessage(message, new SendListener(channel, message.length(), listener)); - } catch (Exception ex) { - listener.onFailure(ex); - } + outboundHandler.sendBytes(channel, message, ActionListener.wrap(() -> CloseableChannel.closeChannel(channel))); } } else { logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); @@ -660,16 +631,6 @@ protected void onServerException(TcpServerChannel channel, Exception e) { logger.error(new ParameterizedMessage("exception from server channel caught on transport layer [channel={}]", channel), e); } - /** - * Exception handler for exceptions that are not associated with a specific channel. - * - * @param exception the exception - */ - protected void onNonChannelException(Exception exception) { - logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), - exception); - } - protected void serverAcceptedChannel(TcpChannel channel) { boolean addedOnThisCall = acceptedChannels.add(channel); assert addedOnThisCall : "Channel should only be added to accepted channel set once"; @@ -701,65 +662,21 @@ protected void serverAcceptedChannel(TcpChannel channel) { */ protected abstract void stopInternal(); - private boolean canCompress(TransportRequest request) { - return request instanceof BytesTransportRequest == false; - } - private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel channel, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options, Version channelVersion, - boolean compressRequest, byte status) throws IOException, TransportException { - - // only compress if asked and the request is not bytes. Otherwise only - // the header part is compressed, and the "body" can't be extracted as compressed - final boolean compressMessage = compressRequest && canCompress(request); - - status = TransportStatus.setRequest(status); - ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); - final CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bStream, compressMessage); - boolean addedReleaseListener = false; - try { - if (compressMessage) { - status = TransportStatus.setCompress(status); - } - - // we pick the smallest of the 2, to support both backward and forward compatibility - // note, this is the only place we need to do this, since from here on, we use the serialized version - // as the version to use also when the node receiving this request will send the response with - Version version = Version.min(this.version, channelVersion); - - stream.setVersion(version); - threadPool.getThreadContext().writeTo(stream); - if (version.onOrAfter(Version.V_6_3_0)) { - stream.writeStringArray(features); - } - stream.writeString(action); - BytesReference message = buildMessage(requestId, status, node.getVersion(), request, stream); - final TransportRequestOptions finalOptions = options; - // this might be called in a different thread - ReleaseListener releaseListener = new ReleaseListener(stream, - () -> messageListener.onRequestSent(node, requestId, action, request, finalOptions)); - internalSendMessage(channel, message, releaseListener); - addedReleaseListener = true; - } finally { - if (!addedReleaseListener) { - IOUtils.close(stream); - } - } + boolean compressRequest) throws IOException, TransportException { + sendRequestToChannel(node, channel, requestId, action, request, options, channelVersion, compressRequest, false); } - /** - * sends a message to the given channel, using the given callbacks. - */ - private void internalSendMessage(TcpChannel channel, BytesReference message, ActionListener listener) { - channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); - transportLogger.logOutboundMessage(channel, message); - try { - channel.sendMessage(message, new SendListener(channel, message.length(), listener)); - } catch (Exception ex) { - // call listener to ensure that any resources are released - listener.onFailure(ex); - onException(channel, ex); - } + private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel channel, final long requestId, final String action, + final TransportRequest request, TransportRequestOptions options, Version channelVersion, + boolean compressRequest, boolean isHandshake) throws IOException, TransportException { + Version version = Version.min(this.version, channelVersion); + OutboundMessage.Request message = new OutboundMessage.Request(threadPool.getThreadContext(), features, request, version, action, + requestId, isHandshake, compressRequest); + ActionListener listener = ActionListener.wrap(() -> + messageListener.onRequestSent(node, requestId, action, request, options)); + outboundHandler.sendMessage(channel, message, listener); } /** @@ -779,23 +696,13 @@ public void sendErrorResponse( final Exception error, final long requestId, final String action) throws IOException { - try (BytesStreamOutput stream = new BytesStreamOutput()) { - stream.setVersion(nodeVersion); - stream.setFeatures(features); - RemoteTransportException tx = new RemoteTransportException( - nodeName, new TransportAddress(channel.getLocalAddress()), action, error); - threadPool.getThreadContext().writeTo(stream); - stream.writeException(tx); - byte status = 0; - status = TransportStatus.setResponse(status); - status = TransportStatus.setError(status); - final BytesReference bytes = stream.bytes(); - final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length()); - CompositeBytesReference message = new CompositeBytesReference(header, bytes); - ReleaseListener releaseListener = new ReleaseListener(null, - () -> messageListener.onResponseSent(requestId, action, error)); - internalSendMessage(channel, message, releaseListener); - } + Version version = Version.min(this.version, nodeVersion); + TransportAddress address = new TransportAddress(channel.getLocalAddress()); + RemoteTransportException tx = new RemoteTransportException(nodeName, address, action, error); + OutboundMessage.Response message = new OutboundMessage.Response(threadPool.getThreadContext(), features, tx, version, requestId, + false, false); + ActionListener listener = ActionListener.wrap(() -> messageListener.onResponseSent(requestId, action, error)); + outboundHandler.sendMessage(channel, message, listener); } /** @@ -811,7 +718,7 @@ public void sendResponse( final long requestId, final String action, final boolean compress) throws IOException { - sendResponse(nodeVersion, features, channel, response, requestId, action, compress, (byte) 0); + sendResponse(nodeVersion, features, channel, response, requestId, action, compress, false); } private void sendResponse( @@ -822,82 +729,18 @@ private void sendResponse( final long requestId, final String action, boolean compress, - byte status) throws IOException { - - status = TransportStatus.setResponse(status); - ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); - CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bStream, compress); - boolean addedReleaseListener = false; - try { - if (compress) { - status = TransportStatus.setCompress(status); - } - threadPool.getThreadContext().writeTo(stream); - stream.setVersion(nodeVersion); - stream.setFeatures(features); - BytesReference message = buildMessage(requestId, status, nodeVersion, response, stream); - - // this might be called in a different thread - ReleaseListener releaseListener = new ReleaseListener(stream, - () -> messageListener.onResponseSent(requestId, action, response)); - internalSendMessage(channel, message, releaseListener); - addedReleaseListener = true; - } finally { - if (!addedReleaseListener) { - IOUtils.close(stream); - } - } - } - - /** - * Writes the Tcp message header into a bytes reference. - * - * @param requestId the request ID - * @param status the request status - * @param protocolVersion the protocol version used to serialize the data in the message - * @param length the payload length in bytes - * @see TcpHeader - */ - private BytesReference buildHeader(long requestId, byte status, Version protocolVersion, int length) throws IOException { - try (BytesStreamOutput headerOutput = new BytesStreamOutput(TcpHeader.HEADER_SIZE)) { - headerOutput.setVersion(protocolVersion); - TcpHeader.writeHeader(headerOutput, requestId, status, protocolVersion, length); - final BytesReference bytes = headerOutput.bytes(); - assert bytes.length() == TcpHeader.HEADER_SIZE : "header size mismatch expected: " + TcpHeader.HEADER_SIZE + " but was: " - + bytes.length(); - return bytes; - } - } - - /** - * Serializes the given message into a bytes representation - */ - private BytesReference buildMessage(long requestId, byte status, Version nodeVersion, TransportMessage message, - CompressibleBytesOutputStream stream) throws IOException { - final BytesReference zeroCopyBuffer; - if (message instanceof BytesTransportRequest) { // what a shitty optimization - we should use a direct send method instead - BytesTransportRequest bRequest = (BytesTransportRequest) message; - assert nodeVersion.equals(bRequest.version()); - bRequest.writeThin(stream); - zeroCopyBuffer = bRequest.bytes; - } else { - message.writeTo(stream); - zeroCopyBuffer = BytesArray.EMPTY; - } - // we have to call materializeBytes() here before accessing the bytes. A CompressibleBytesOutputStream - // might be implementing compression. And materializeBytes() ensures that some marker bytes (EOS marker) - // are written. Otherwise we barf on the decompressing end when we read past EOF on purpose in the - // #validateRequest method. this might be a problem in deflate after all but it's important to write - // the marker bytes. - final BytesReference messageBody = stream.materializeBytes(); - final BytesReference header = buildHeader(requestId, status, stream.getVersion(), messageBody.length() + zeroCopyBuffer.length()); - return new CompositeBytesReference(header, messageBody, zeroCopyBuffer); + boolean isHandshake) throws IOException { + Version version = Version.min(this.version, nodeVersion); + OutboundMessage.Response message = new OutboundMessage.Response(threadPool.getThreadContext(), features, response, version, + requestId, isHandshake, compress); + ActionListener listener = ActionListener.wrap(() -> messageListener.onResponseSent(requestId, action, response)); + outboundHandler.sendMessage(channel, message, listener); } /** * Handles inbound message that has been decoded. * - * @param channel the channel the message if fomr + * @param channel the channel the message is from * @param message the message */ public void inboundMessage(TcpChannel channel, BytesReference message) { @@ -1065,53 +908,26 @@ public HttpOnTransportException(StreamInput in) throws IOException { * This method handles the message receive part for both request and responses */ public final void messageReceived(BytesReference reference, TcpChannel channel) throws IOException { - String profileName = channel.getProfile(); + readBytesMetric.inc(reference.length() + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE); InetSocketAddress remoteAddress = channel.getRemoteAddress(); - int messageLengthBytes = reference.length(); - final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; - readBytesMetric.inc(totalMessageSize); - // we have additional bytes to read, outside of the header - boolean hasMessageBytesToRead = (totalMessageSize - TcpHeader.HEADER_SIZE) > 0; - StreamInput streamIn = reference.streamInput(); - boolean success = false; - try (ThreadContext.StoredContext tCtx = threadPool.getThreadContext().stashContext()) { - long requestId = streamIn.readLong(); - byte status = streamIn.readByte(); - Version version = Version.fromId(streamIn.readInt()); - if (TransportStatus.isCompress(status) && hasMessageBytesToRead && streamIn.available() > 0) { - Compressor compressor; - try { - final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE; - compressor = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)); - } catch (NotCompressedException ex) { - int maxToRead = Math.min(reference.length(), 10); - StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead) - .append("] content bytes out of [").append(reference.length()) - .append("] readable bytes with message size [").append(messageLengthBytes).append("] ").append("] are ["); - for (int i = 0; i < maxToRead; i++) { - sb.append(reference.get(i)).append(","); - } - sb.append("]"); - throw new IllegalStateException(sb.toString()); - } - streamIn = compressor.streamInput(streamIn); - } - final boolean isHandshake = TransportStatus.isHandshake(status); - ensureVersionCompatibility(version, this.version, isHandshake); - streamIn = new NamedWriteableAwareStreamInput(streamIn, namedWriteableRegistry); - streamIn.setVersion(version); - threadPool.getThreadContext().readHeaders(streamIn); - threadPool.getThreadContext().putTransient("_remote_address", remoteAddress); - if (TransportStatus.isRequest(status)) { - handleRequest(channel, profileName, streamIn, requestId, messageLengthBytes, version, remoteAddress, status); + + ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext existing = threadContext.stashContext(); + InboundMessage message = reader.deserialize(reference)) { + // Place the context with the headers from the message + message.getStoredContext().restore(); + threadContext.putTransient("_remote_address", remoteAddress); + if (message.isRequest()) { + handleRequest(channel, (InboundMessage.RequestMessage) message, reference.length()); } else { final TransportResponseHandler handler; - if (isHandshake) { + long requestId = message.getRequestId(); + if (message.isHandshake()) { handler = handshaker.removeHandlerForHandshake(requestId); } else { TransportResponseHandler theHandler = responseHandlers.onResponseReceived(requestId, messageListener); - if (theHandler == null && TransportStatus.isError(status)) { + if (theHandler == null && message.isError()) { handler = handshaker.removeHandlerForHandshake(requestId); } else { handler = theHandler; @@ -1119,40 +935,20 @@ public final void messageReceived(BytesReference reference, TcpChannel channel) } // ignore if its null, the service logs it if (handler != null) { - if (TransportStatus.isError(status)) { - handlerResponseError(streamIn, handler); + if (message.isError()) { + handlerResponseError(message.getStreamInput(), handler); } else { - handleResponse(remoteAddress, streamIn, handler); + handleResponse(remoteAddress, message.getStreamInput(), handler); } // Check the entire message has been read - final int nextByte = streamIn.read(); + final int nextByte = message.getStreamInput().read(); // calling read() is useful to make sure the message is fully read, even if there is an EOS marker if (nextByte != -1) { throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler [" - + handler + "], error [" + TransportStatus.isError(status) + "]; resetting"); + + handler + "], error [" + message.isError() + "]; resetting"); } } } - success = true; - } finally { - if (success) { - IOUtils.close(streamIn); - } else { - IOUtils.closeWhileHandlingException(streamIn); - } - } - } - - static void ensureVersionCompatibility(Version version, Version currentVersion, boolean isHandshake) { - // for handshakes we are compatible with N-2 since otherwise we can't figure out our initial version - // since we are compatible with N-1 and N+1 so we always send our minCompatVersion as the initial version in the - // handshake. This looks odd but it's required to establish the connection correctly we check for real compatibility - // once the connection is established - final Version compatibilityVersion = isHandshake ? currentVersion.minimumCompatibilityVersion() : currentVersion; - if (version.isCompatible(compatibilityVersion) == false) { - final Version minCompatibilityVersion = isHandshake ? compatibilityVersion : compatibilityVersion.minimumCompatibilityVersion(); - String msg = "Received " + (isHandshake ? "handshake " : "") + "message from unsupported version: ["; - throw new IllegalStateException(msg + version + "] minimal compatible version is: [" + minCompatibilityVersion + "]"); } } @@ -1208,20 +1004,17 @@ private void handleException(final TransportResponseHandler handler, Throwable e }); } - protected String handleRequest(TcpChannel channel, String profileName, final StreamInput stream, long requestId, - int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) - throws IOException { - final Set features; - if (version.onOrAfter(Version.V_6_3_0)) { - features = Collections.unmodifiableSet(new TreeSet<>(Arrays.asList(stream.readStringArray()))); - } else { - features = Collections.emptySet(); - } - final String action = stream.readString(); + protected void handleRequest(TcpChannel channel, InboundMessage.RequestMessage message, int messageLengthBytes) throws IOException { + final Set features = message.getFeatures(); + final String profileName = channel.getProfile(); + final String action = message.getActionName(); + final long requestId = message.getRequestId(); + final StreamInput stream = message.getStreamInput(); + final Version version = message.getVersion(); messageListener.onRequestReceived(requestId, action); TransportChannel transportChannel = null; try { - if (TransportStatus.isHandshake(status)) { + if (message.isHandshake()) { handshaker.handleHandshake(version, features, channel, requestId, stream); } else { final RequestHandlerRegistry reg = getRequestHandler(action); @@ -1234,9 +1027,9 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str getInFlightRequestBreaker().addWithoutBreaking(messageLengthBytes); } transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, features, profileName, - messageLengthBytes, TransportStatus.isCompress(status)); + messageLengthBytes, message.isCompress()); final TransportRequest request = reg.newRequest(stream); - request.remoteAddress(new TransportAddress(remoteAddress)); + request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); // in case we throw an exception, i.e. when the limit is hit, we don't want to verify validateRequest(stream, requestId, action); threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); @@ -1245,7 +1038,7 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str // the circuit breaker tripped if (transportChannel == null) { transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, features, - profileName, 0, TransportStatus.isCompress(status)); + profileName, 0, message.isCompress()); } try { transportChannel.sendResponse(e); @@ -1254,7 +1047,6 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", action), inner); } } - return action; } // This template method is needed to inject custom error checking logic in tests. @@ -1331,70 +1123,11 @@ protected final void ensureOpen() { } } - /** - * This listener increments the transmitted bytes metric on success. - */ - private class SendListener extends NotifyOnceListener { - - private final TcpChannel channel; - private final long messageSize; - private final ActionListener delegateListener; - - private SendListener(TcpChannel channel, long messageSize, ActionListener delegateListener) { - this.channel = channel; - this.messageSize = messageSize; - this.delegateListener = delegateListener; - } - - @Override - protected void innerOnResponse(Void v) { - transmittedBytesMetric.inc(messageSize); - delegateListener.onResponse(v); - } - - @Override - protected void innerOnFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("send message failed [channel: {}]", channel), e); - delegateListener.onFailure(e); - } - } - - private class ReleaseListener implements ActionListener { - - private final Closeable optionalCloseable; - private final Runnable transportAdaptorCallback; - - private ReleaseListener(Closeable optionalCloseable, Runnable transportAdaptorCallback) { - this.optionalCloseable = optionalCloseable; - this.transportAdaptorCallback = transportAdaptorCallback; - } - - @Override - public void onResponse(Void aVoid) { - closeAndCallback(null); - } - - @Override - public void onFailure(Exception e) { - closeAndCallback(e); - } - - private void closeAndCallback(final Exception e) { - try { - IOUtils.close(optionalCloseable, transportAdaptorCallback::run); - } catch (final IOException inner) { - if (e != null) { - inner.addSuppressed(e); - } - throw new UncheckedIOException(inner); - } - } - } - @Override public final TransportStats getStats() { - return new TransportStats(acceptedChannels.size(), readBytesMetric.count(), readBytesMetric.sum(), transmittedBytesMetric.count(), - transmittedBytesMetric.sum()); + MeanMetric transmittedBytes = outboundHandler.getTransmittedBytes(); + return new TransportStats(acceptedChannels.size(), readBytesMetric.count(), readBytesMetric.sum(), transmittedBytes.count(), + transmittedBytes.sum()); } /** @@ -1569,7 +1302,7 @@ public void onFailure(Exception ex) { public void onTimeout() { if (countDown.fastForward()) { CloseableChannel.closeChannels(channels, false); - listener.onFailure(new ConnectTransportException(node, "connect_timeout[" + connectionProfile.getConnectTimeout() + "]")); + listener.onFailure(new ConnectTransportException(node, "connect_timeout[" + connectionProfile.getConnectTimeout() + "]")); } } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportLogger.java b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java index ea01cc4ddbfa6..0198609f4c3b6 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportLogger.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java @@ -51,6 +51,10 @@ void logInboundMessage(TcpChannel channel, BytesReference message) { void logOutboundMessage(TcpChannel channel, BytesReference message) { if (logger.isTraceEnabled()) { try { + if (message.get(0) != 'E') { + // This is not an Elasticsearch transport message. + return; + } BytesReference withoutHeader = message.slice(HEADER_SIZE, message.length() - HEADER_SIZE); String logMessage = format(channel, withoutHeader, "WRITE"); logger.trace(logMessage); diff --git a/server/src/main/java/org/elasticsearch/transport/Transports.java b/server/src/main/java/org/elasticsearch/transport/Transports.java index c531d33224a60..d6968a855365a 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transports.java +++ b/server/src/main/java/org/elasticsearch/transport/Transports.java @@ -38,7 +38,6 @@ public static boolean isTransportThread(Thread t) { final String threadName = t.getName(); for (String s : Arrays.asList( HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, - HttpServerTransport.HTTP_SERVER_ACCEPTOR_THREAD_NAME_PREFIX, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX, TEST_MOCK_TRANSPORT_THREAD_PREFIX)) { if (threadName.contains(s)) { diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 489a98dcbaac6..b4ce907d6f7b3 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.client.AbstractClientHeadersTestCase; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException; import org.elasticsearch.cluster.coordination.CoordinationStateRejectedException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IllegalShardRoutingStateException; @@ -809,8 +808,7 @@ public void testIds() { ids.put(148, UnknownNamedObjectException.class); ids.put(149, MultiBucketConsumerService.TooManyBucketsException.class); ids.put(150, CoordinationStateRejectedException.class); - ids.put(151, ClusterAlreadyBootstrappedException.class); - ids.put(152, SnapshotInProgressException.class); + ids.put(151, SnapshotInProgressException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequestTests.java deleted file mode 100644 index ee9c58413b350..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequestTests.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.Collections; - -import static org.hamcrest.Matchers.equalTo; - -public class BootstrapClusterRequestTests extends ESTestCase { - - public void testSerialization() throws IOException { - final BootstrapConfiguration bootstrapConfiguration - = new BootstrapConfiguration(Collections.singletonList(new NodeDescription(null, randomAlphaOfLength(10)))); - final BootstrapClusterRequest original = new BootstrapClusterRequest(bootstrapConfiguration); - assertNull(original.validate()); - final BootstrapClusterRequest deserialized = copyWriteable(original, writableRegistry(), BootstrapClusterRequest::new); - assertThat(deserialized.getBootstrapConfiguration(), equalTo(bootstrapConfiguration)); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponseTests.java deleted file mode 100644 index fb33dbc5fcbd6..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponseTests.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; - -import static org.hamcrest.Matchers.equalTo; - -public class BootstrapClusterResponseTests extends ESTestCase { - public void testSerialization() throws IOException { - final BootstrapClusterResponse original = new BootstrapClusterResponse(randomBoolean()); - final BootstrapClusterResponse deserialized = copyWriteable(original, writableRegistry(), BootstrapClusterResponse::new); - assertThat(deserialized.getAlreadyBootstrapped(), equalTo(original.getAlreadyBootstrapped())); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfigurationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfigurationTests.java deleted file mode 100644 index fc2e24017e76a..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfigurationTests.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNode.Role; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.EqualsHashCodeTestUtils; -import org.elasticsearch.test.EqualsHashCodeTestUtils.CopyFunction; - -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.singleton; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.startsWith; - -public class BootstrapConfigurationTests extends ESTestCase { - - public void testEqualsHashcodeSerialization() { - // Note: the explicit cast of the CopyFunction is needed for some IDE (specifically Eclipse 4.8.0) to infer the right type - EqualsHashCodeTestUtils.checkEqualsAndHashCode(randomBootstrapConfiguration(), - (CopyFunction) bootstrapConfiguration -> copyWriteable(bootstrapConfiguration, writableRegistry(), - BootstrapConfiguration::new), - this::mutate); - } - - public void testNodeDescriptionResolvedByName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - assertThat(new NodeDescription(null, expectedNode.getName()).resolve(discoveryNodes), equalTo(expectedNode)); - } - - public void testNodeDescriptionResolvedByIdAndName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - assertThat(new NodeDescription(expectedNode).resolve(discoveryNodes), equalTo(expectedNode)); - } - - public void testRejectsMismatchedId() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(randomAlphaOfLength(11), expectedNode.getName()).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("node id mismatch comparing ")); - } - - public void testRejectsMismatchedName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(expectedNode.getId(), randomAlphaOfLength(11)).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("node name mismatch comparing ")); - } - - public void testFailsIfNoMatch() { - final List discoveryNodes = randomDiscoveryNodes(); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> randomNodeDescription().resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("no node matching ")); - } - - public void testFailsIfDuplicateMatchOnName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode discoveryNode = randomFrom(discoveryNodes); - discoveryNodes.add(new DiscoveryNode(discoveryNode.getName(), randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), - singleton(Role.MASTER), Version.CURRENT)); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(null, discoveryNode.getName()).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("discovered multiple nodes matching ")); - } - - public void testFailsIfDuplicatedNode() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode discoveryNode = randomFrom(discoveryNodes); - discoveryNodes.add(discoveryNode); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(discoveryNode).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("discovered multiple nodes matching ")); - } - - public void testResolvesEntireConfiguration() { - final List discoveryNodes = randomDiscoveryNodes(); - final List selectedNodes = randomSubsetOf(randomIntBetween(1, discoveryNodes.size()), discoveryNodes); - final BootstrapConfiguration bootstrapConfiguration = new BootstrapConfiguration(selectedNodes.stream() - .map(discoveryNode -> randomBoolean() ? new NodeDescription(discoveryNode) : new NodeDescription(null, discoveryNode.getName())) - .collect(Collectors.toList())); - - final VotingConfiguration expectedConfiguration - = new VotingConfiguration(selectedNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet())); - final VotingConfiguration votingConfiguration = bootstrapConfiguration.resolve(discoveryNodes); - assertThat(votingConfiguration, equalTo(expectedConfiguration)); - } - - public void testRejectsDuplicatedDescriptions() { - final List discoveryNodes = randomDiscoveryNodes(); - final List selectedNodes = randomSubsetOf(randomIntBetween(1, discoveryNodes.size()), discoveryNodes); - final List selectedNodeDescriptions = selectedNodes.stream() - .map(discoveryNode -> randomBoolean() ? new NodeDescription(discoveryNode) : new NodeDescription(null, discoveryNode.getName())) - .collect(Collectors.toList()); - final NodeDescription toDuplicate = randomFrom(selectedNodeDescriptions); - selectedNodeDescriptions.add(randomBoolean() ? toDuplicate : new NodeDescription(null, toDuplicate.getName())); - final BootstrapConfiguration bootstrapConfiguration = new BootstrapConfiguration(selectedNodeDescriptions); - - final ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> bootstrapConfiguration.resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("multiple nodes matching ")); - } - - private NodeDescription mutate(NodeDescription original) { - if (randomBoolean()) { - return new NodeDescription(original.getId(), randomAlphaOfLength(21 - original.getName().length())); - } else { - if (original.getId() == null) { - return new NodeDescription(randomAlphaOfLength(10), original.getName()); - } else if (randomBoolean()) { - return new NodeDescription(randomAlphaOfLength(21 - original.getId().length()), original.getName()); - } else { - return new NodeDescription(null, original.getName()); - } - } - } - - protected BootstrapConfiguration mutate(BootstrapConfiguration original) { - final List newDescriptions = new ArrayList<>(original.getNodeDescriptions()); - final int mutateElement = randomIntBetween(0, newDescriptions.size()); - if (mutateElement == newDescriptions.size()) { - newDescriptions.add(randomIntBetween(0, newDescriptions.size()), randomNodeDescription()); - } else { - if (newDescriptions.size() > 1 && randomBoolean()) { - newDescriptions.remove(mutateElement); - } else { - newDescriptions.set(mutateElement, mutate(newDescriptions.get(mutateElement))); - } - } - return new BootstrapConfiguration(newDescriptions); - } - - protected NodeDescription randomNodeDescription() { - return new NodeDescription(randomBoolean() ? null : randomAlphaOfLength(10), randomAlphaOfLength(10)); - } - - protected BootstrapConfiguration randomBootstrapConfiguration() { - final int size = randomIntBetween(1, 5); - final List nodeDescriptions = new ArrayList<>(size); - while (nodeDescriptions.size() <= size) { - nodeDescriptions.add(randomNodeDescription()); - } - return new BootstrapConfiguration(nodeDescriptions); - } - - protected List randomDiscoveryNodes() { - final int size = randomIntBetween(1, 5); - final List nodes = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - nodes.add(new DiscoveryNode(randomAlphaOfLength(10), randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), - singleton(Role.MASTER), Version.CURRENT)); - } - return nodes; - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequestTests.java deleted file mode 100644 index 676e2e958cac7..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequestTests.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; - -import static org.hamcrest.Matchers.endsWith; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.startsWith; -import static org.hamcrest.core.Is.is; - -public class GetDiscoveredNodesRequestTests extends ESTestCase { - - public void testTimeoutValidation() { - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - assertThat("default value is 30s", getDiscoveredNodesRequest.getTimeout(), is(TimeValue.timeValueSeconds(30))); - - final TimeValue newTimeout = TimeValue.parseTimeValue(randomTimeValue(), "timeout"); - getDiscoveredNodesRequest.setTimeout(newTimeout); - assertThat("value updated", getDiscoveredNodesRequest.getTimeout(), equalTo(newTimeout)); - - final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> getDiscoveredNodesRequest.setTimeout(TimeValue.timeValueNanos(randomLongBetween(-10, -1)))); - assertThat(exception.getMessage(), startsWith("negative timeout of ")); - assertThat(exception.getMessage(), endsWith(" is not allowed")); - - getDiscoveredNodesRequest.setTimeout(null); - assertThat("value updated", getDiscoveredNodesRequest.getTimeout(), nullValue()); - } - - public void testSerialization() throws IOException { - final GetDiscoveredNodesRequest originalRequest = new GetDiscoveredNodesRequest(); - - if (randomBoolean()) { - originalRequest.setTimeout(TimeValue.parseTimeValue(randomTimeValue(), "timeout")); - } else if (randomBoolean()) { - originalRequest.setTimeout(null); - } - - final GetDiscoveredNodesRequest deserialized = copyWriteable(originalRequest, writableRegistry(), GetDiscoveredNodesRequest::new); - - assertThat(deserialized.getTimeout(), equalTo(originalRequest.getTimeout())); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponseTests.java deleted file mode 100644 index 7d2fc602e66c6..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponseTests.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNode.Role; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Set; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.singleton; -import static org.hamcrest.Matchers.equalTo; - -public class GetDiscoveredNodesResponseTests extends ESTestCase { - public void testSerialization() throws IOException { - final GetDiscoveredNodesResponse original = new GetDiscoveredNodesResponse(randomDiscoveryNodeSet()); - final GetDiscoveredNodesResponse deserialized = copyWriteable(original, writableRegistry(), GetDiscoveredNodesResponse::new); - assertThat(deserialized.getNodes(), equalTo(original.getNodes())); - } - - private Set randomDiscoveryNodeSet() { - final int size = randomIntBetween(1, 10); - final Set nodes = new HashSet<>(size); - while (nodes.size() < size) { - assertTrue(nodes.add(new DiscoveryNode(randomAlphaOfLength(10), randomAlphaOfLength(10), - UUIDs.randomBase64UUID(random()), randomAlphaOfLength(10), randomAlphaOfLength(10), buildNewFakeTransportAddress(), - emptyMap(), singleton(Role.MASTER), Version.CURRENT))); - } - return nodes; - } - - public void testConversionToBootstrapConfiguration() { - final Set nodes = randomDiscoveryNodeSet(); - assertThat(new GetDiscoveredNodesResponse(nodes).getBootstrapConfiguration().resolve(nodes).getNodeIds(), - equalTo(nodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet()))); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterActionTests.java deleted file mode 100644 index 31486a52bd08f..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterActionTests.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.coordination.InMemoryPersistedState; -import org.elasticsearch.cluster.coordination.NoOpClusterApplier; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.MockTransport; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.util.Collections; -import java.util.Random; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static java.util.Collections.singletonList; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyZeroInteractions; - -public class TransportBootstrapClusterActionTests extends ESTestCase { - - private static final ActionFilters EMPTY_FILTERS = new ActionFilters(emptySet()); - - private DiscoveryNode discoveryNode; - private static ThreadPool threadPool; - private TransportService transportService; - private Coordinator coordinator; - - private static BootstrapClusterRequest exampleRequest() { - return new BootstrapClusterRequest(new BootstrapConfiguration(singletonList(new NodeDescription("id", "name")))); - } - - @BeforeClass - public static void createThreadPool() { - threadPool = new TestThreadPool("test", Settings.EMPTY); - } - - @AfterClass - public static void shutdownThreadPool() { - threadPool.shutdown(); - } - - @Before - public void setupTest() { - discoveryNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); - final MockTransport transport = new MockTransport(); - transportService = transport.createTransportService(Settings.EMPTY, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> discoveryNode, null, emptySet()); - - final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - coordinator = new Coordinator("local", Settings.EMPTY, clusterSettings, transportService, writableRegistry(), - ESAllocationTestCase.createAllocationService(Settings.EMPTY), - new MasterService("local", Settings.EMPTY, threadPool), - () -> new InMemoryPersistedState(0, ClusterState.builder(new ClusterName("cluster")).build()), r -> emptyList(), - new NoOpClusterApplier(), Collections.emptyList(), new Random(random().nextLong())); - } - - public void testHandlesNonstandardDiscoveryImplementation() throws InterruptedException { - final Discovery discovery = mock(Discovery.class); - verifyZeroInteractions(discovery); - - final String nonstandardDiscoveryType = randomFrom(DiscoveryModule.ZEN_DISCOVERY_TYPE, "single-node", "unknown"); - new TransportBootstrapClusterAction( - Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), nonstandardDiscoveryType).build(), - EMPTY_FILTERS, transportService, discovery); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, exampleRequest(), new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), equalTo("cluster bootstrapping is not supported by discovery type [" + - nonstandardDiscoveryType + "]")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsOnNonMasterEligibleNodes() throws InterruptedException { - discoveryNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - // transport service only picks up local node when started, so we can change it here ^ - - new TransportBootstrapClusterAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, exampleRequest(), new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), - equalTo("this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testSetsInitialConfiguration() throws InterruptedException { - new TransportBootstrapClusterAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - assertFalse(coordinator.isInitialConfigurationSet()); - - final BootstrapClusterRequest request - = new BootstrapClusterRequest(new BootstrapConfiguration(singletonList(new NodeDescription(discoveryNode)))); - - { - final int parallelRequests = 10; - final CountDownLatch countDownLatch = new CountDownLatch(parallelRequests); - final AtomicInteger successes = new AtomicInteger(); - - for (int i = 0; i < parallelRequests; i++) { - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, request, new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - if (response.getAlreadyBootstrapped() == false) { - successes.incrementAndGet(); - } - countDownLatch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - } - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - assertThat(successes.get(), equalTo(1)); - } - - assertTrue(coordinator.isInitialConfigurationSet()); - - { - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, request, new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - assertTrue(response.getAlreadyBootstrapped()); - countDownLatch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - } - - private abstract class ResponseHandler implements TransportResponseHandler { - @Override - public String executor() { - return Names.SAME; - } - - @Override - public BootstrapClusterResponse read(StreamInput in) throws IOException { - return new BootstrapClusterResponse(in); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesActionTests.java deleted file mode 100644 index 6d94dcf6eca14..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesActionTests.java +++ /dev/null @@ -1,533 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.Version; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; -import org.elasticsearch.cluster.coordination.CoordinationMetaData; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.coordination.InMemoryPersistedState; -import org.elasticsearch.cluster.coordination.NoOpClusterApplier; -import org.elasticsearch.cluster.coordination.PeersResponse; -import org.elasticsearch.cluster.coordination.PublicationTransportHandler; -import org.elasticsearch.cluster.coordination.PublishWithJoinResponse; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.discovery.PeersRequest; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.MockTransport; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.BytesTransportRequest; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.TransportService.HandshakeResponse; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.EnumSet; -import java.util.Random; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static java.util.Collections.singleton; -import static java.util.Collections.singletonList; -import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING; -import static org.elasticsearch.discovery.PeerFinder.REQUEST_PEERS_ACTION_NAME; -import static org.elasticsearch.transport.TransportService.HANDSHAKE_ACTION_NAME; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.startsWith; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyZeroInteractions; - -public class TransportGetDiscoveredNodesActionTests extends ESTestCase { - - private static final ActionFilters EMPTY_FILTERS = new ActionFilters(emptySet()); - - private static ThreadPool threadPool; - private DiscoveryNode localNode; - private String clusterName; - private TransportService transportService; - private Coordinator coordinator; - private DiscoveryNode otherNode; - - @BeforeClass - public static void createThreadPool() { - threadPool = new TestThreadPool("test", Settings.EMPTY); - } - - @AfterClass - public static void shutdownThreadPool() { - threadPool.shutdown(); - } - - @Before - public void setupTest() { - clusterName = randomAlphaOfLength(10); - localNode = new DiscoveryNode( - "node1", "local", buildNewFakeTransportAddress(), emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); - otherNode = new DiscoveryNode( - "node2", "other", buildNewFakeTransportAddress(), emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); - - final MockTransport transport = new MockTransport() { - @Override - protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) { - if (action.equals(HANDSHAKE_ACTION_NAME) && node.getAddress().equals(otherNode.getAddress())) { - handleResponse(requestId, new HandshakeResponse(otherNode, new ClusterName(clusterName), Version.CURRENT)); - } - } - }; - transportService = transport.createTransportService( - Settings.builder().put(CLUSTER_NAME_SETTING.getKey(), clusterName).build(), threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, emptySet()); - - final Settings settings = Settings.builder() - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), - ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)).build(); // suppress auto-bootstrap - - final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - coordinator = new Coordinator("local", settings, clusterSettings, transportService, writableRegistry(), - ESAllocationTestCase.createAllocationService(settings), - new MasterService("local", settings, threadPool), - () -> new InMemoryPersistedState(0, ClusterState.builder(new ClusterName(clusterName)).build()), r -> emptyList(), - new NoOpClusterApplier(), Collections.emptyList(), new Random(random().nextLong())); - } - - public void testHandlesNonstandardDiscoveryImplementation() throws InterruptedException { - final Discovery discovery = mock(Discovery.class); - verifyZeroInteractions(discovery); - - final String nonstandardDiscoveryType = randomFrom(DiscoveryModule.ZEN_DISCOVERY_TYPE, "single-node", "unknown"); - new TransportGetDiscoveredNodesAction( - Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), nonstandardDiscoveryType).build(), - EMPTY_FILTERS, transportService, discovery); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, new GetDiscoveredNodesRequest(), new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), equalTo("discovered nodes are not exposed by discovery type [" + - nonstandardDiscoveryType + "]")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsOnMasterIneligibleNodes() throws InterruptedException { - localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - // transport service only picks up local node when started, so we can change it here ^ - - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, new GetDiscoveredNodesRequest(), new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), - equalTo("this node is not master-eligible, but discovered nodes are only exposed by master-eligible nodes")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsQuicklyWithZeroTimeoutAndAcceptsNullTimeout() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - { - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(null); - getDiscoveredNodesRequest.setRequiredNodes(singletonList("not-a-node")); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - } - - { - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - getDiscoveredNodesRequest.setRequiredNodes(singletonList("not-a-node")); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(ElasticsearchTimeoutException.class)); - assertThat(rootCause.getMessage(), startsWith("timed out while waiting for GetDiscoveredNodesRequest{")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - } - - public void testFailsIfAlreadyBootstrapped() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - coordinator.setInitialConfiguration(new VotingConfiguration(singleton(localNode.getId()))); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(null); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - if (exp.getRootCause() instanceof ClusterAlreadyBootstrappedException) { - countDownLatch.countDown(); - } else { - throw new AssertionError("should not be called", exp); - } - } - }); - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsIfAcceptsClusterStateWithNonemptyConfiguration() throws InterruptedException, IOException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(null); - getDiscoveredNodesRequest.setRequiredNodes(singletonList("not-a-node")); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - if (exp.getRootCause() instanceof ClusterAlreadyBootstrappedException) { - countDownLatch.countDown(); - } else { - throw new AssertionError("should not be called", exp); - } - } - }); - - ClusterState.Builder publishedClusterState = ClusterState.builder(ClusterName.DEFAULT); - publishedClusterState.incrementVersion(); - publishedClusterState.nodes(DiscoveryNodes.builder() - .add(localNode).add(otherNode).localNodeId(localNode.getId()).masterNodeId(otherNode.getId())); - publishedClusterState.metaData(MetaData.builder().coordinationMetaData(CoordinationMetaData.builder() - .term(1) - .lastAcceptedConfiguration(new VotingConfiguration(singleton(otherNode.getId()))) - .lastCommittedConfiguration(new VotingConfiguration(singleton(otherNode.getId()))) - .build())); - - transportService.sendRequest(localNode, PublicationTransportHandler.PUBLISH_STATE_ACTION_NAME, - new BytesTransportRequest(PublicationTransportHandler.serializeFullClusterState(publishedClusterState.build(), Version.CURRENT), - Version.CURRENT), - new TransportResponseHandler() { - @Override - public void handleResponse(PublishWithJoinResponse response) { - // do nothing - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - - @Override - public String executor() { - return Names.SAME; - } - - @Override - public PublishWithJoinResponse read(StreamInput in) throws IOException { - return new PublishWithJoinResponse(in); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testGetsDiscoveredNodesWithZeroTimeout() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - public void testGetsDiscoveredNodesByAddress() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getAddress().toString(), otherNode.getAddress().toString())); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - public void testGetsDiscoveredNodesByName() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getName(), otherNode.getName())); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - public void testGetsDiscoveredNodesByIP() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - String ip = localNode.getAddress().getAddress(); - getDiscoveredNodesRequest.setRequiredNodes(Collections.singletonList(ip)); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionFailedOnDuplicate(getDiscoveredNodesRequest, '[' + ip + "] matches ["); - } - - public void testGetsDiscoveredNodesDuplicateName() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - String name = localNode.getName(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(name, name)); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionFailedOnDuplicate(getDiscoveredNodesRequest, "[" + localNode + "] matches [" + name + ", " + name + ']'); - } - - public void testGetsDiscoveredNodesWithDuplicateMatchNameAndAddress() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getAddress().toString(), localNode.getName())); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionFailedOnDuplicate(getDiscoveredNodesRequest, "[" + localNode + "] matches ["); - } - - public void testGetsDiscoveredNodesTimeoutOnMissing() throws InterruptedException { - setupGetDiscoveredNodesAction(); - - final CountDownLatch latch = new CountDownLatch(1); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getAddress().toString(), "_missing")); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - assertThat(exp.getRootCause(), instanceOf(ElasticsearchTimeoutException.class)); - latch.countDown(); - } - }); - - latch.await(10L, TimeUnit.SECONDS); - } - - public void testThrowsExceptionIfDuplicateDiscoveredLater() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - final String ip = localNode.getAddress().getAddress(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(ip, "not-a-node")); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - Throwable t = exp.getRootCause(); - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), startsWith('[' + ip + "] matches [")); - countDownLatch.countDown(); - } - }); - - executeRequestPeersAction(); - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - private void executeRequestPeersAction() { - threadPool.generic().execute(() -> - transportService.sendRequest(localNode, REQUEST_PEERS_ACTION_NAME, new PeersRequest(otherNode, emptyList()), - new TransportResponseHandler() { - @Override - public PeersResponse read(StreamInput in) throws IOException { - return new PeersResponse(in); - } - - @Override - public void handleResponse(PeersResponse response) { - } - - @Override - public void handleException(TransportException exp) { - } - - @Override - public String executor() { - return Names.SAME; - } - })); - } - - private void setupGetDiscoveredNodesAction() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - executeRequestPeersAction(); - - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getName(), otherNode.getName())); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - private void assertWaitConditionMet(GetDiscoveredNodesRequest getDiscoveredNodesRequest) throws InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - assertThat(response.getNodes(), containsInAnyOrder(localNode, otherNode)); - countDownLatch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - private void assertWaitConditionFailedOnDuplicate(GetDiscoveredNodesRequest getDiscoveredNodesRequest, String message) - throws InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - Throwable t = exp.getRootCause(); - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), startsWith(message)); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - private abstract class ResponseHandler implements TransportResponseHandler { - @Override - public String executor() { - return Names.SAME; - } - - @Override - public GetDiscoveredNodesResponse read(StreamInput in) throws IOException { - return new GetDiscoveredNodesResponse(in); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index a5ab81d83fbcd..e262147ef85d0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -102,9 +102,8 @@ public void testSort() { size = first.get().queryResult().size(); } int accumulatedLength = Math.min(queryResultSize, getTotalQueryHits(results)); - ScoreDoc[] sortedDocs = SearchPhaseController.sortDocs(true, results.asList(), null, new SearchPhaseController.TopDocsStats(), - from, size) - .scoreDocs; + ScoreDoc[] sortedDocs = SearchPhaseController.sortDocs(true, results.asList(), null, + new SearchPhaseController.TopDocsStats(SearchContext.TRACK_TOTAL_HITS_ACCURATE), from, size).scoreDocs; for (Suggest.Suggestion suggestion : reducedSuggest(results)) { int suggestionSize = suggestion.getEntries().get(0).getOptions().size(); accumulatedLength += suggestionSize; @@ -126,12 +125,12 @@ public void testSortIsIdempotent() throws Exception { from = first.get().queryResult().from(); size = first.get().queryResult().size(); } - SearchPhaseController.TopDocsStats topDocsStats = new SearchPhaseController.TopDocsStats(); + SearchPhaseController.TopDocsStats topDocsStats = new SearchPhaseController.TopDocsStats(SearchContext.TRACK_TOTAL_HITS_ACCURATE); ScoreDoc[] sortedDocs = SearchPhaseController.sortDocs(ignoreFrom, results.asList(), null, topDocsStats, from, size).scoreDocs; results = generateSeededQueryResults(randomSeed, nShards, Collections.emptyList(), queryResultSize, useConstantScore); - SearchPhaseController.TopDocsStats topDocsStats2 = new SearchPhaseController.TopDocsStats(); + SearchPhaseController.TopDocsStats topDocsStats2 = new SearchPhaseController.TopDocsStats(SearchContext.TRACK_TOTAL_HITS_ACCURATE); ScoreDoc[] sortedDocs2 = SearchPhaseController.sortDocs(ignoreFrom, results.asList(), null, topDocsStats2, from, size).scoreDocs; assertEquals(sortedDocs.length, sortedDocs2.length); for (int i = 0; i < sortedDocs.length; i++) { @@ -139,7 +138,7 @@ public void testSortIsIdempotent() throws Exception { assertEquals(sortedDocs[i].shardIndex, sortedDocs2[i].shardIndex); assertEquals(sortedDocs[i].score, sortedDocs2[i].score, 0.0f); } - assertEquals(topDocsStats.maxScore, topDocsStats2.maxScore, 0.0f); + assertEquals(topDocsStats.getMaxScore(), topDocsStats2.getMaxScore(), 0.0f); assertEquals(topDocsStats.getTotalHits().value, topDocsStats2.getTotalHits().value); assertEquals(topDocsStats.getTotalHits().relation, topDocsStats2.getTotalHits().relation); assertEquals(topDocsStats.fetchHits, topDocsStats2.fetchHits); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java new file mode 100644 index 0000000000000..d02b712eaaef3 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -0,0 +1,561 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.range.InternalDateRange; +import org.elasticsearch.search.aggregations.bucket.range.Range; +import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.SearchProfileShardResults; +import org.elasticsearch.search.profile.SearchProfileShardResultsTests; +import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.suggest.completion.CompletionSuggestion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.RemoteClusterAware; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.PriorityQueue; +import java.util.TreeMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class SearchResponseMergerTests extends ESTestCase { + + private int numResponses; + private ExecutorService executorService; + + @Before + public void init() { + numResponses = randomIntBetween(2, 10); + executorService = Executors.newFixedThreadPool(numResponses); + } + + private void addResponse(SearchResponseMerger searchResponseMerger, SearchResponse searchResponse) { + if (randomBoolean()) { + executorService.submit(() -> searchResponseMerger.add(searchResponse)); + } else { + searchResponseMerger.add(searchResponse); + } + } + + private void awaitResponsesAdded() throws InterruptedException { + executorService.shutdown(); + executorService.awaitTermination(5, TimeUnit.SECONDS); + } + + public void testMergeTookInMillis() throws InterruptedException { + long currentRelativeTime = randomLong(); + SearchTimeProvider timeProvider = new SearchTimeProvider(randomLong(), 0, () -> currentRelativeTime); + SearchResponseMerger merger = new SearchResponseMerger(randomIntBetween(0, 1000), randomIntBetween(0, 10000), + SearchContext.TRACK_TOTAL_HITS_ACCURATE, timeProvider, flag -> null); + for (int i = 0; i < numResponses; i++) { + SearchResponse searchResponse = new SearchResponse(InternalSearchResponse.empty(), null, 1, 1, 0, randomLong(), + ShardSearchFailure.EMPTY_ARRAY, SearchResponseTests.randomClusters()); + addResponse(merger, searchResponse); + } + awaitResponsesAdded(); + SearchResponse searchResponse = merger.getMergedResponse(SearchResponse.Clusters.EMPTY); + assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); + } + + public void testMergeShardFailures() throws InterruptedException { + SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); + SearchResponseMerger merger = new SearchResponseMerger(0, 0, SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchTimeProvider, flag -> null); + PriorityQueue> priorityQueue = new PriorityQueue<>(Comparator.comparing(Tuple::v1)); + int numIndices = numResponses * randomIntBetween(1, 3); + Iterator> indicesPerCluster = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); + for (int i = 0; i < numResponses; i++) { + Map.Entry entry = indicesPerCluster.next(); + String clusterAlias = entry.getKey(); + Index[] indices = entry.getValue(); + int numFailures = randomIntBetween(1, 10); + ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; + for (int j = 0; j < numFailures; j++) { + ShardId shardId = new ShardId(randomFrom(indices), j); + ShardSearchFailure failure; + if (randomBoolean()) { + SearchShardTarget searchShardTarget = new SearchShardTarget(randomAlphaOfLength(6), shardId, clusterAlias, null); + failure = new ShardSearchFailure(new IllegalArgumentException(), searchShardTarget); + } else { + ElasticsearchException elasticsearchException = new ElasticsearchException(new IllegalArgumentException()); + elasticsearchException.setShard(shardId); + failure = new ShardSearchFailure(elasticsearchException); + } + shardSearchFailures[j] = failure; + priorityQueue.add(Tuple.tuple(shardId, failure)); + } + SearchResponse searchResponse = new SearchResponse(InternalSearchResponse.empty(), null, + 1, 1, 0, 100L, shardSearchFailures, SearchResponse.Clusters.EMPTY); + addResponse(merger, searchResponse); + } + awaitResponsesAdded(); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(priorityQueue.size(), mergedResponse.getFailedShards()); + ShardSearchFailure[] shardFailures = mergedResponse.getShardFailures(); + assertEquals(priorityQueue.size(), shardFailures.length); + for (ShardSearchFailure shardFailure : shardFailures) { + ShardSearchFailure expected = priorityQueue.poll().v2(); + assertSame(expected, shardFailure); + } + } + + public void testMergeShardFailuresNullShardId() throws InterruptedException { + SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); + SearchResponseMerger merger = new SearchResponseMerger(0, 0, SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchTimeProvider, flag -> null); + List expectedFailures = new ArrayList<>(); + for (int i = 0; i < numResponses; i++) { + int numFailures = randomIntBetween(1, 50); + ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; + for (int j = 0; j < numFailures; j++) { + ShardSearchFailure shardSearchFailure = new ShardSearchFailure(new ElasticsearchException(new IllegalArgumentException())); + shardSearchFailures[j] = shardSearchFailure; + expectedFailures.add(shardSearchFailure); + } + SearchResponse searchResponse = new SearchResponse(InternalSearchResponse.empty(), null, + 1, 1, 0, 100L, shardSearchFailures, SearchResponse.Clusters.EMPTY); + addResponse(merger, searchResponse); + } + awaitResponsesAdded(); + ShardSearchFailure[] shardFailures = merger.getMergedResponse(SearchResponse.Clusters.EMPTY).getShardFailures(); + assertThat(Arrays.asList(shardFailures), containsInAnyOrder(expectedFailures.toArray(ShardSearchFailure.EMPTY_ARRAY))); + } + + public void testMergeProfileResults() throws InterruptedException { + SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); + SearchResponseMerger merger = new SearchResponseMerger(0, 0, SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchTimeProvider, flag -> null); + Map expectedProfile = new HashMap<>(); + for (int i = 0; i < numResponses; i++) { + SearchProfileShardResults profile = SearchProfileShardResultsTests.createTestItem(); + expectedProfile.putAll(profile.getShardResults()); + SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, profile, false, null, 1); + SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, 1, 1, 0, 100L, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + addResponse(merger, searchResponse); + } + awaitResponsesAdded(); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(0, mergedResponse.getFailedShards()); + assertEquals(0, mergedResponse.getShardFailures().length); + assertEquals(expectedProfile, mergedResponse.getProfileResults()); + } + + public void testMergeSuggestions() throws InterruptedException { + String suggestionName = randomAlphaOfLengthBetween(4, 8); + boolean skipDuplicates = randomBoolean(); + int size = randomIntBetween(1, 100); + SearchResponseMerger searchResponseMerger = new SearchResponseMerger(0, 0, 0, new SearchTimeProvider(0, 0, () -> 0), flag -> null); + for (int i = 0; i < numResponses; i++) { + List>> suggestions = + new ArrayList<>(); + CompletionSuggestion completionSuggestion = new CompletionSuggestion(suggestionName, size, skipDuplicates); + CompletionSuggestion.Entry options = new CompletionSuggestion.Entry(new Text("suggest"), 0, 10); + options.addOption(new CompletionSuggestion.Entry.Option(randomInt(), new Text("suggestion"), i, Collections.emptyMap())); + completionSuggestion.addTerm(options); + suggestions.add(completionSuggestion); + Suggest suggest = new Suggest(suggestions); + SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, suggest, null, false, null, 1); + SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, 1, 1, 0, randomLong(), + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + addResponse(searchResponseMerger, searchResponse); + } + awaitResponsesAdded(); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(0, mergedResponse.getFailedShards()); + assertEquals(0, mergedResponse.getShardFailures().length); + Suggest.Suggestion> suggestion = + mergedResponse.getSuggest().getSuggestion(suggestionName); + assertEquals(1, suggestion.getEntries().size()); + Suggest.Suggestion.Entry options = suggestion.getEntries().get(0); + assertEquals(skipDuplicates ? 1 : Math.min(numResponses, size), options.getOptions().size()); + int i = numResponses; + for (Suggest.Suggestion.Entry.Option option : options) { + assertEquals("suggestion", option.getText().string()); + assertEquals(--i, option.getScore(), 0f); + } + } + + public void testMergeAggs() throws InterruptedException { + SearchResponseMerger searchResponseMerger = new SearchResponseMerger(0, 0, 0, new SearchTimeProvider(0, 0, () -> 0), + flag -> new InternalAggregation.ReduceContext(null, null, flag)); + String maxAggName = randomAlphaOfLengthBetween(5, 8); + String rangeAggName = randomAlphaOfLengthBetween(5, 8); + int totalCount = 0; + double maxValue = Double.MIN_VALUE; + for (int i = 0; i < numResponses; i++) { + double value = randomDouble(); + maxValue = Math.max(value, maxValue); + InternalMax max = new InternalMax(maxAggName, value, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()); + InternalDateRange.Factory factory = new InternalDateRange.Factory(); + int count = randomIntBetween(1, 1000); + totalCount += count; + InternalDateRange.Bucket bucket = factory.createBucket("bucket", 0, 10000, count, InternalAggregations.EMPTY, + false, DocValueFormat.RAW); + InternalDateRange range = factory.create(rangeAggName, Collections.singletonList(bucket), DocValueFormat.RAW, false, + Collections.emptyList(), Collections.emptyMap()); + InternalAggregations aggs = new InternalAggregations(Arrays.asList(range, max)); + SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, aggs, null, null, false, null, 1); + SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, 1, 1, 0, randomLong(), + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + addResponse(searchResponseMerger, searchResponse); + } + awaitResponsesAdded(); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(0, mergedResponse.getFailedShards()); + assertEquals(0, mergedResponse.getShardFailures().length); + assertEquals(0, mergedResponse.getHits().getHits().length); + assertEquals(2, mergedResponse.getAggregations().asList().size()); + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(maxValue, max.getValue(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(totalCount, bucket.getDocCount()); + } + + public void testMergeSearchHits() throws InterruptedException { + final long currentRelativeTime = randomLong(); + final SearchTimeProvider timeProvider = new SearchTimeProvider(randomLong(), 0, () -> currentRelativeTime); + final int size = randomIntBetween(0, 100); + final int from = size > 0 ? randomIntBetween(0, 100) : 0; + final int requestedSize = from + size; + final SortField[] sortFields; + final String collapseField; + boolean scoreSort = false; + if (randomBoolean()) { + int numFields = randomIntBetween(1, 3); + sortFields = new SortField[numFields]; + for (int i = 0; i < numFields; i++) { + final SortField sortField; + if (randomBoolean()) { + sortField = new SortField("field-" + i, SortField.Type.INT, randomBoolean()); + } else { + scoreSort = true; + sortField = SortField.FIELD_SCORE; + } + sortFields[i] = sortField; + } + collapseField = randomBoolean() ? "collapse" : null; + } else { + collapseField = null; + sortFields = null; + scoreSort = true; + } + Tuple randomTrackTotalHits = randomTrackTotalHits(); + int trackTotalHitsUpTo = randomTrackTotalHits.v1(); + TotalHits.Relation totalHitsRelation = randomTrackTotalHits.v2(); + + PriorityQueue priorityQueue = new PriorityQueue<>(new SearchHitComparator(sortFields)); + SearchResponseMerger searchResponseMerger = new SearchResponseMerger(from, size, trackTotalHitsUpTo, timeProvider, flag -> null); + + TotalHits expectedTotalHits = null; + int expectedTotal = 0; + int expectedSuccessful = 0; + int expectedSkipped = 0; + int expectedReducePhases = 1; + boolean expectedTimedOut = false; + Boolean expectedTerminatedEarly = null; + float expectedMaxScore = Float.NEGATIVE_INFINITY; + int numIndices = requestedSize == 0 ? 0 : randomIntBetween(1, requestedSize); + Iterator> indicesIterator = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); + for (int i = 0; i < numResponses; i++) { + Map.Entry entry = indicesIterator.next(); + String clusterAlias = entry.getKey().equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) ? null : entry.getKey(); + Index[] indices = entry.getValue(); + int total = randomIntBetween(1, 1000); + expectedTotal += total; + int successful = randomIntBetween(1, total); + expectedSuccessful += successful; + int skipped = randomIntBetween(1, total); + expectedSkipped += skipped; + + TotalHits totalHits = null; + if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { + totalHits = new TotalHits(randomLongBetween(0, 1000), totalHitsRelation); + long previousValue = expectedTotalHits == null ? 0 : expectedTotalHits.value; + expectedTotalHits = new TotalHits(Math.min(previousValue + totalHits.value, trackTotalHitsUpTo), totalHitsRelation); + } + + final int numDocs = totalHits == null || totalHits.value >= requestedSize ? requestedSize : (int) totalHits.value; + int scoreFactor = randomIntBetween(1, numResponses); + float maxScore = scoreSort ? numDocs * scoreFactor : Float.NaN; + SearchHit[] hits = randomSearchHitArray(numDocs, numResponses, clusterAlias, indices, maxScore, scoreFactor, + sortFields, priorityQueue); + expectedMaxScore = Math.max(expectedMaxScore, maxScore); + + Object[] collapseValues = null; + if (collapseField != null) { + collapseValues = new Object[numDocs]; + for (int j = 0; j < numDocs; j++) { + //set different collapse values for each cluster for simplicity + collapseValues[j] = j + 1000 * i; + } + } + + SearchHits searchHits = new SearchHits(hits, totalHits, maxScore == Float.NEGATIVE_INFINITY ? Float.NaN : maxScore, + sortFields, collapseField, collapseValues); + + int numReducePhases = randomIntBetween(1, 5); + expectedReducePhases += numReducePhases; + boolean timedOut = rarely(); + expectedTimedOut = expectedTimedOut || timedOut; + Boolean terminatedEarly = frequently() ? null : true; + expectedTerminatedEarly = expectedTerminatedEarly == null ? terminatedEarly : expectedTerminatedEarly; + + InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + searchHits, null, null, null, timedOut, terminatedEarly, numReducePhases); + + SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, total, successful, skipped, + randomLong(), ShardSearchFailure.EMPTY_ARRAY, SearchResponseTests.randomClusters()); + + addResponse(searchResponseMerger, searchResponse); + } + + awaitResponsesAdded(); + + final SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse searchResponse = searchResponseMerger.getMergedResponse(clusters); + + assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); + assertEquals(expectedTotal, searchResponse.getTotalShards()); + assertEquals(expectedSuccessful, searchResponse.getSuccessfulShards()); + assertEquals(expectedSkipped, searchResponse.getSkippedShards()); + assertEquals(0, searchResponse.getShardFailures().length); + assertEquals(expectedReducePhases, searchResponse.getNumReducePhases()); + assertEquals(expectedTimedOut, searchResponse.isTimedOut()); + assertEquals(expectedTerminatedEarly, searchResponse.isTerminatedEarly()); + + assertSame(clusters, searchResponse.getClusters()); + assertNull(searchResponse.getScrollId()); + + SearchHits searchHits = searchResponse.getHits(); + assertArrayEquals(sortFields, searchHits.getSortFields()); + assertEquals(collapseField, searchHits.getCollapseField()); + if (expectedTotalHits == null) { + assertNull(searchHits.getTotalHits()); + } else { + assertNotNull(searchHits.getTotalHits()); + assertEquals(expectedTotalHits.value, searchHits.getTotalHits().value); + assertSame(expectedTotalHits.relation, searchHits.getTotalHits().relation); + } + if (expectedMaxScore == Float.NEGATIVE_INFINITY) { + assertTrue(Float.isNaN(searchHits.getMaxScore())); + } else { + assertEquals(expectedMaxScore, searchHits.getMaxScore(), 0f); + } + + for (int i = 0; i < from; i++) { + priorityQueue.poll(); + } + SearchHit[] hits = searchHits.getHits(); + if (collapseField != null) { + assertEquals(hits.length, searchHits.getCollapseValues().length); + } else { + assertNull(searchHits.getCollapseValues()); + } + assertThat(hits.length, lessThanOrEqualTo(size)); + for (SearchHit hit : hits) { + SearchHit expected = priorityQueue.poll(); + assertSame(expected, hit); + } + } + + private static Tuple randomTrackTotalHits() { + switch(randomIntBetween(0, 2)) { + case 0: + return Tuple.tuple(SearchContext.TRACK_TOTAL_HITS_DISABLED, null); + case 1: + return Tuple.tuple(randomIntBetween(10, 1000), TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); + case 2: + return Tuple.tuple(SearchContext.TRACK_TOTAL_HITS_ACCURATE, TotalHits.Relation.EQUAL_TO); + default: + throw new UnsupportedOperationException(); + } + } + + private static SearchHit[] randomSearchHitArray(int numDocs, int numResponses, String clusterAlias, Index[] indices, float maxScore, + int scoreFactor, SortField[] sortFields, PriorityQueue priorityQueue) { + SearchHit[] hits = new SearchHit[numDocs]; + + int[] sortFieldFactors = new int[sortFields == null ? 0 : sortFields.length]; + for (int j = 0; j < sortFieldFactors.length; j++) { + sortFieldFactors[j] = randomIntBetween(1, numResponses); + } + + for (int j = 0; j < numDocs; j++) { + ShardId shardId = new ShardId(randomFrom(indices), randomIntBetween(0, 10)); + SearchShardTarget shardTarget = new SearchShardTarget(randomAlphaOfLengthBetween(3, 8), shardId, + clusterAlias, OriginalIndices.NONE); + SearchHit hit = new SearchHit(randomIntBetween(0, Integer.MAX_VALUE)); + + float score = Float.NaN; + if (Float.isNaN(maxScore) == false) { + score = (maxScore - j) * scoreFactor; + hit.score(score); + } + + hit.shard(shardTarget); + if (sortFields != null) { + Object[] rawSortValues = new Object[sortFields.length]; + DocValueFormat[] docValueFormats = new DocValueFormat[sortFields.length]; + for (int k = 0; k < sortFields.length; k++) { + SortField sortField = sortFields[k]; + if (sortField == SortField.FIELD_SCORE) { + hit.score(score); + rawSortValues[k] = score; + } else { + rawSortValues[k] = sortField.getReverse() ? numDocs * sortFieldFactors[k] - j : j; + } + docValueFormats[k] = DocValueFormat.RAW; + } + hit.sortValues(rawSortValues, docValueFormats); + } + hits[j] = hit; + priorityQueue.add(hit); + } + return hits; + } + + private static Map randomRealisticIndices(int numIndices, int numClusters) { + String[] indicesNames = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indicesNames[i] = randomAlphaOfLengthBetween(5, 10); + } + Map indicesPerCluster = new TreeMap<>(); + for (int i = 0; i < numClusters; i++) { + Index[] indices = new Index[indicesNames.length]; + for (int j = 0; j < indices.length; j++) { + //Realistically clusters have the same indices with same names, but different uuid + indices[j] = new Index(indicesNames[j], randomAlphaOfLength(10)); + } + String clusterAlias; + if (frequently() || indicesPerCluster.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + clusterAlias = randomAlphaOfLengthBetween(5, 10); + } else { + clusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + } + indicesPerCluster.put(clusterAlias, indices); + } + return indicesPerCluster; + } + + private static final class SearchHitComparator implements Comparator { + + private final SortField[] sortFields; + + SearchHitComparator(SortField[] sortFields) { + this.sortFields = sortFields; + } + + @Override + public int compare(SearchHit a, SearchHit b) { + if (sortFields == null) { + int scoreCompare = Float.compare(b.getScore(), a.getScore()); + if (scoreCompare != 0) { + return scoreCompare; + } + } else { + for (int i = 0; i < sortFields.length; i++) { + SortField sortField = sortFields[i]; + if (sortField == SortField.FIELD_SCORE) { + int scoreCompare = Float.compare(b.getScore(), a.getScore()); + if (scoreCompare != 0) { + return scoreCompare; + } + } else { + Integer aSortValue = (Integer)a.getRawSortValues()[i]; + Integer bSortValue = (Integer)b.getRawSortValues()[i]; + final int compare; + if (sortField.getReverse()) { + compare = Integer.compare(bSortValue, aSortValue); + } else { + compare = Integer.compare(aSortValue, bSortValue); + } + if (compare != 0) { + return compare; + } + } + } + } + int shardIdCompareTo = a.getShard().getShardId().compareTo(b.getShard().getShardId()); + if (shardIdCompareTo != 0) { + return shardIdCompareTo; + } + return Integer.compare(a.docId(), b.docId()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index ecd2dc44a702e..f07be38765f66 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -128,7 +128,7 @@ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... sha shardSearchFailures, randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY); } - private static SearchResponse.Clusters randomClusters() { + static SearchResponse.Clusters randomClusters() { int totalClusters = randomIntBetween(0, 10); int successfulClusters = randomIntBetween(0, totalClusters); int skippedClusters = totalClusters - successfulClusters; diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java index 542247c058861..46a43afa53897 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -20,49 +20,52 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterResponse; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesAction; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.Settings.Builder; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransport; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportService; import org.junit.Before; -import java.util.Set; +import java.util.Collections; +import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING; import static org.elasticsearch.common.settings.Settings.builder; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; public class ClusterBootstrapServiceTests extends ESTestCase { private DiscoveryNode localNode, otherNode1, otherNode2; private DeterministicTaskQueue deterministicTaskQueue; private TransportService transportService; - private ClusterBootstrapService clusterBootstrapService; @Before public void createServices() { @@ -81,10 +84,6 @@ protected void onSendRequest(long requestId, String action, TransportRequest req transportService = transport.createTransportService(Settings.EMPTY, deterministicTaskQueue.getThreadPool(), TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, emptySet()); - - clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), - localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), - transportService); } private DiscoveryNode newDiscoveryNode(String nodeName) { @@ -92,152 +91,392 @@ private DiscoveryNode newDiscoveryNode(String nodeName) { Version.CURRENT); } - private void startServices() { - transportService.start(); - transportService.acceptIncomingRequests(); - clusterBootstrapService.start(); - } + public void testBootstrapsAutomaticallyWithDefaultConfiguration() { + final Settings.Builder settings = Settings.builder(); + final long timeout; + if (randomBoolean()) { + timeout = UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(Settings.EMPTY).millis(); + } else { + timeout = randomLongBetween(1, 10000); + settings.put(UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.getKey(), timeout + "ms"); + } - public void testDoesNothingOnNonMasterNodes() { - localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> { - throw new AssertionError("should not make a discovery request"); - }); + final AtomicReference>> discoveredNodesSupplier = new AtomicReference<>(() -> { + throw new AssertionError("should not be called yet"); + }); - startServices(); - deterministicTaskQueue.runAllTasks(); + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService + = new ClusterBootstrapService(settings.build(), transportService, () -> discoveredNodesSupplier.get().get(), + () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), + equalTo(Stream.of(localNode, otherNode1, otherNode2).map(DiscoveryNode::getId).collect(Collectors.toSet()))); + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), greaterThanOrEqualTo(timeout)); + }); + + deterministicTaskQueue.scheduleAt(timeout - 1, + () -> discoveredNodesSupplier.set(() -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()))); + + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); + deterministicTaskQueue.runAllTasksInTimeOrder(); + assertTrue(bootstrapped.get()); } public void testDoesNothingByDefaultIfHostsProviderConfigured() { - testConfiguredIfSettingSet(builder().putList(DISCOVERY_HOSTS_PROVIDER_SETTING.getKey())); + testDoesNothingWithSettings(builder().putList(DISCOVERY_HOSTS_PROVIDER_SETTING.getKey())); } public void testDoesNothingByDefaultIfUnicastHostsConfigured() { - testConfiguredIfSettingSet(builder().putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey())); + testDoesNothingWithSettings(builder().putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey())); } public void testDoesNothingByDefaultIfMasterNodesConfigured() { - testConfiguredIfSettingSet(builder().putList(INITIAL_MASTER_NODES_SETTING.getKey())); + testDoesNothingWithSettings(builder().putList(INITIAL_MASTER_NODES_SETTING.getKey())); } - private void testConfiguredIfSettingSet(Builder builder) { - clusterBootstrapService = new ClusterBootstrapService(builder.build(), transportService); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> { - throw new AssertionError("should not make a discovery request"); - }); - startServices(); + public void testDoesNothingByDefaultOnMasterIneligibleNodes() { + localNode = new DiscoveryNode("local", randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), emptySet(), + Version.CURRENT); + testDoesNothingWithSettings(Settings.builder()); + } + + private void testDoesNothingWithSettings(Settings.Builder builder) { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(builder.build(), transportService, () -> { + throw new AssertionError("should not be called"); + }, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); deterministicTaskQueue.runAllTasks(); } - public void testBootstrapsAutomaticallyWithDefaultConfiguration() { - clusterBootstrapService = new ClusterBootstrapService(Settings.EMPTY, transportService); + public void testDoesNothingByDefaultIfZen1NodesDiscovered() { + final DiscoveryNode zen1Node = new DiscoveryNode("zen1", buildNewFakeTransportAddress(), singletonMap("zen1", "true"), + singleton(Role.MASTER), Version.CURRENT); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.EMPTY, transportService, () -> + Stream.of(localNode, zen1Node).collect(Collectors.toSet()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); + deterministicTaskQueue.runAllTasks(); + } + + + public void testThrowsExceptionOnDuplicates() { + final IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> { + new ClusterBootstrapService(builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), "duplicate-requirement", "duplicate-requirement").build(), + transportService, Collections::emptyList, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + }); - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes))); + assertThat(illegalArgumentException.getMessage(), containsString(INITIAL_MASTER_NODES_SETTING.getKey())); + assertThat(illegalArgumentException.getMessage(), containsString("duplicate-requirement")); + } + public void testBootstrapsOnDiscoveryOfAllRequiredNodes() { final AtomicBoolean bootstrapped = new AtomicBoolean(); - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> { - assertThat(request.getBootstrapConfiguration().getNodeDescriptions().stream() - .map(NodeDescription::getId).collect(Collectors.toSet()), - equalTo(discoveredNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet()))); - channel.sendResponse(new BootstrapClusterResponse(randomBoolean())); - assertTrue(bootstrapped.compareAndSet(false, true)); - }); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), containsInAnyOrder(localNode.getId(), otherNode1.getId(), otherNode2.getId())); + assertThat(vc.getNodeIds(), not(hasItem(containsString("placeholder")))); + }); - startServices(); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once + } + + public void testBootstrapsOnDiscoveryOfTwoOfThreeRequiredNodes() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> singletonList(otherNode1), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), hasSize(3)); + assertThat(vc.getNodeIds(), hasItem(localNode.getId())); + assertThat(vc.getNodeIds(), hasItem(otherNode1.getId())); + assertThat(vc.getNodeIds(), hasItem(allOf(startsWith(BOOTSTRAP_PLACEHOLDER_PREFIX), containsString(otherNode2.getName())))); + assertTrue(vc.hasQuorum(Stream.of(localNode, otherNode1).map(DiscoveryNode::getId).collect(Collectors.toList()))); + assertFalse(vc.hasQuorum(singletonList(localNode.getId()))); + assertFalse(vc.hasQuorum(singletonList(otherNode1.getId()))); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); assertTrue(bootstrapped.get()); + + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once } - public void testDoesNotRetryOnDiscoveryFailure() { - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - new TransportRequestHandler() { - private boolean called = false; + public void testBootstrapsOnDiscoveryOfThreeOfFiveRequiredNodes() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); - @Override - public void messageReceived(GetDiscoveredNodesRequest request, TransportChannel channel, Task task) { - assert called == false; - called = true; - throw new IllegalArgumentException("simulate failure of discovery request"); - } - }); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName(), + "missing-node-1", "missing-node-2").build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), hasSize(5)); + assertThat(vc.getNodeIds(), hasItem(localNode.getId())); + assertThat(vc.getNodeIds(), hasItem(otherNode1.getId())); + assertThat(vc.getNodeIds(), hasItem(otherNode2.getId())); + + final List placeholders + = vc.getNodeIds().stream().filter(ClusterBootstrapService::isBootstrapPlaceholder).collect(Collectors.toList()); + assertThat(placeholders.size(), equalTo(2)); + assertNotEquals(placeholders.get(0), placeholders.get(1)); + assertThat(placeholders, hasItem(containsString("missing-node-1"))); + assertThat(placeholders, hasItem(containsString("missing-node-2"))); + + assertTrue(vc.hasQuorum(Stream.of(localNode, otherNode1, otherNode2).map(DiscoveryNode::getId).collect(Collectors.toList()))); + assertFalse(vc.hasQuorum(Stream.of(localNode, otherNode1).map(DiscoveryNode::getId).collect(Collectors.toList()))); + assertFalse(vc.hasQuorum(Stream.of(localNode, otherNode1).map(DiscoveryNode::getId).collect(Collectors.toList()))); + }); - startServices(); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once } - public void testBootstrapsOnDiscoverySuccess() { - final AtomicBoolean discoveryAttempted = new AtomicBoolean(); - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> { - assertTrue(discoveryAttempted.compareAndSet(false, true)); - channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes)); - }); + public void testDoesNotBootstrapIfNoNodesDiscovered() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, Collections::emptyList, () -> true, vc -> { + throw new AssertionError("should not be called"); + }); - final AtomicBoolean bootstrapAttempted = new AtomicBoolean(); - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> { - assertTrue(bootstrapAttempted.compareAndSet(false, true)); - channel.sendResponse(new BootstrapClusterResponse(false)); - }); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } - startServices(); + public void testDoesNotBootstrapIfTwoOfFiveNodesDiscovered() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), + localNode.getName(), otherNode1.getName(), otherNode2.getName(), "not-a-node-1", "not-a-node-2").build(), + transportService, () -> Stream.of(otherNode1).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); + } + + public void testDoesNotBootstrapIfThreeOfSixNodesDiscovered() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), + localNode.getName(), otherNode1.getName(), otherNode2.getName(), "not-a-node-1", "not-a-node-2", "not-a-node-3").build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); - assertTrue(discoveryAttempted.get()); - assertTrue(bootstrapAttempted.get()); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); } - public void testRetriesOnBootstrapFailure() { - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes))); + public void testDoesNotBootstrapIfAlreadyBootstrapped() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> true, vc -> { + throw new AssertionError("should not be called"); + }); - AtomicLong callCount = new AtomicLong(); - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> { - callCount.incrementAndGet(); - channel.sendResponse(new ElasticsearchException("simulated exception")); - }); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } - startServices(); - while (callCount.get() < 5) { - if (deterministicTaskQueue.hasDeferredTasks()) { - deterministicTaskQueue.advanceTime(); - } - deterministicTaskQueue.runAllRunnableTasks(); - } + public void testDoesNotBootstrapsOnNonMasterNode() { + localNode = new DiscoveryNode("local", randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), emptySet(), + Version.CURRENT); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> + Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testDoesNotBootstrapsIfNotConfigured() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey()).build(), transportService, + () -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); } - public void testStopsRetryingBootstrapWhenStopped() { - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes))); + public void testDoesNotBootstrapsIfZen1NodesDiscovered() { + final DiscoveryNode zen1Node = new DiscoveryNode("zen1", buildNewFakeTransportAddress(), singletonMap("zen1", "true"), + singleton(Role.MASTER), Version.CURRENT); - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> channel.sendResponse(new ElasticsearchException("simulated exception"))); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2, zen1Node).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); - deterministicTaskQueue.scheduleAt(deterministicTaskQueue.getCurrentTimeMillis() + 200000, new Runnable() { - @Override - public void run() { - clusterBootstrapService.stop(); - } + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } - @Override - public String toString() { - return "stop cluster bootstrap service"; + public void testRetriesBootstrappingOnException() { + + final AtomicLong bootstrappingAttempts = new AtomicLong(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + bootstrappingAttempts.incrementAndGet(); + if (bootstrappingAttempts.get() < 5L) { + throw new ElasticsearchException("test"); } }); - startServices(); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertThat(bootstrappingAttempts.get(), greaterThanOrEqualTo(5L)); + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), greaterThanOrEqualTo(40000L)); + } + + public void testCancelsBootstrapIfRequirementMatchesMultipleNodes() { + AtomicReference> discoveredNodes + = new AtomicReference<>(Stream.of(otherNode1, otherNode2).collect(Collectors.toList())); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getAddress().getAddress()).build(), + transportService, discoveredNodes::get, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + + discoveredNodes.set(emptyList()); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testCancelsBootstrapIfNodeMatchesMultipleRequirements() { + AtomicReference> discoveredNodes + = new AtomicReference<>(Stream.of(otherNode1, otherNode2).collect(Collectors.toList())); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), otherNode1.getAddress().toString(), otherNode1.getName()) + .build(), + transportService, discoveredNodes::get, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + + discoveredNodes.set(Stream.of(new DiscoveryNode(otherNode1.getName(), randomAlphaOfLength(10), buildNewFakeTransportAddress(), + emptyMap(), singleton(Role.MASTER), Version.CURRENT), + new DiscoveryNode("yet-another-node", randomAlphaOfLength(10), otherNode1.getAddress(), emptyMap(), singleton(Role.MASTER), + Version.CURRENT)).collect(Collectors.toList())); + + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testMatchesOnNodeName() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName()).build(), transportService, + Collections::emptyList, () -> false, vc -> assertTrue(bootstrapped.compareAndSet(false, true))); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + } + + public void testMatchesOnNodeAddress() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getAddress().toString()).build(), transportService, + Collections::emptyList, () -> false, vc -> assertTrue(bootstrapped.compareAndSet(false, true))); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + } + + public void testMatchesOnNodeHostAddress() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getAddress().getAddress()).build(), + transportService, Collections::emptyList, () -> false, vc -> assertTrue(bootstrapped.compareAndSet(false, true))); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + } + + public void testDoesNotJustMatchEverything() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), randomAlphaOfLength(10)).build(), transportService, + Collections::emptyList, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testDoesNotIncludeExtraNodes() { + final DiscoveryNode extraNode = newDiscoveryNode("extra-node"); + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2, extraNode).collect(Collectors.toList()), () -> false, + vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), not(hasItem(extraNode.getId()))); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); - // termination means success + assertTrue(bootstrapped.get()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java index 129b29e1f21e5..cf8e1737a7708 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -38,6 +38,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singletonList; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.equalTo; @@ -245,6 +246,13 @@ public void testDescriptionAfterBootstrapping() { "discovery will continue using [] from hosts providers and [" + localNode + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", BOOTSTRAP_PLACEHOLDER_PREFIX + "n3"), + emptyList(), emptyList(), 0L).getDescription(), + is("master not discovered or elected yet, an election requires 2 nodes with ids [n1, n2], " + + "have discovered [] which is not a quorum; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", "n4"), emptyList(), emptyList(), 0L) .getDescription(), is("master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + @@ -259,6 +267,20 @@ public void testDescriptionAfterBootstrapping() { "discovery will continue using [] from hosts providers and [" + localNode + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), + emptyList(), emptyList(), 0L).getDescription(), + is("master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + + "have discovered [] which is not a quorum; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", + BOOTSTRAP_PLACEHOLDER_PREFIX + "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), emptyList(), emptyList(), 0L).getDescription(), + is("master not discovered or elected yet, an election requires 3 nodes with ids [n1, n2, n3], " + + "have discovered [] which is not a quorum; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, new String[]{"n1"}, new String[]{"n1"}), emptyList(), emptyList(), 0L).getDescription(), is("master not discovered or elected yet, an election requires a node with id [n1], " + diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 96746f3343e0a..a9ca7d917b9d8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -26,7 +26,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -48,7 +47,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -82,15 +80,16 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import java.util.function.UnaryOperator; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptySet; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.clusterState; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.setValue; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.value; @@ -125,7 +124,6 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -726,70 +724,6 @@ public void testAckListenerReceivesNacksFromFollowerInHigherTerm() { // assertTrue("expected ack from " + follower1, ackCollector.hasAckedSuccessfully(follower1)); } - public void testDiscoveryOfPeersTriggersNotification() { - final Cluster cluster = new Cluster(randomIntBetween(2, 5)); - - // register a listener and then deregister it again to show that it is not called after deregistration - try (Releasable ignored = cluster.getAnyNode().coordinator.withDiscoveryListener(ActionListener.wrap(() -> { - throw new AssertionError("should not be called"); - }))) { - // do nothing - } - - final long startTimeMillis = cluster.deterministicTaskQueue.getCurrentTimeMillis(); - final ClusterNode bootstrapNode = cluster.getAnyNode(); - final AtomicBoolean hasDiscoveredAllPeers = new AtomicBoolean(); - assertFalse(bootstrapNode.coordinator.getFoundPeers().iterator().hasNext()); - try (Releasable ignored = bootstrapNode.coordinator.withDiscoveryListener( - new ActionListener>() { - @Override - public void onResponse(Iterable discoveryNodes) { - int peerCount = 0; - for (final DiscoveryNode discoveryNode : discoveryNodes) { - peerCount++; - } - assertThat(peerCount, lessThan(cluster.size())); - if (peerCount == cluster.size() - 1 && hasDiscoveredAllPeers.get() == false) { - hasDiscoveredAllPeers.set(true); - final long elapsedTimeMillis = cluster.deterministicTaskQueue.getCurrentTimeMillis() - startTimeMillis; - logger.info("--> {} discovered {} peers in {}ms", bootstrapNode.getId(), cluster.size() - 1, elapsedTimeMillis); - assertThat(elapsedTimeMillis, lessThanOrEqualTo(defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2)); - } - } - - @Override - public void onFailure(Exception e) { - throw new AssertionError("unexpected", e); - } - })) { - cluster.runFor(defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2 + randomLongBetween(0, 60000), "discovery phase"); - } - - assertTrue(hasDiscoveredAllPeers.get()); - - final AtomicBoolean receivedAlreadyBootstrappedException = new AtomicBoolean(); - try (Releasable ignored = bootstrapNode.coordinator.withDiscoveryListener( - new ActionListener>() { - @Override - public void onResponse(Iterable discoveryNodes) { - // ignore - } - - @Override - public void onFailure(Exception e) { - if (e instanceof ClusterAlreadyBootstrappedException) { - receivedAlreadyBootstrappedException.set(true); - } else { - throw new AssertionError("unexpected", e); - } - } - })) { - - cluster.stabilise(); - } - assertTrue(receivedAlreadyBootstrappedException.get()); - } - public void testSettingInitialConfigurationTriggersElection() { final Cluster cluster = new Cluster(randomIntBetween(1, 5)); cluster.runFor(defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2 + randomLongBetween(0, 60000), "initial discovery phase"); @@ -1271,12 +1205,8 @@ public String toString() { } } else if (rarely()) { final ClusterNode clusterNode = getAnyNode(); - clusterNode.onNode( - () -> { - logger.debug("----> [runRandomly {}] applying initial configuration {} to {}", - thisStep, initialConfiguration, clusterNode.getId()); - clusterNode.coordinator.setInitialConfiguration(initialConfiguration); - }).run(); + logger.debug("----> [runRandomly {}] applying initial configuration on {}", step, clusterNode.getId()); + clusterNode.applyInitialConfiguration(); } else { if (deterministicTaskQueue.hasDeferredTasks() && randomBoolean()) { deterministicTaskQueue.advanceTime(); @@ -1803,11 +1733,18 @@ ClusterState getLastAppliedClusterState() { void applyInitialConfiguration() { onNode(() -> { + final Set nodeIdsWithPlaceholders = new HashSet<>(initialConfiguration.getNodeIds()); + Stream.generate(() -> BOOTSTRAP_PLACEHOLDER_PREFIX + UUIDs.randomBase64UUID(random())) + .limit((Math.max(initialConfiguration.getNodeIds().size(), 2) - 1) / 2) + .forEach(nodeIdsWithPlaceholders::add); + final VotingConfiguration configurationWithPlaceholders = new VotingConfiguration(new HashSet<>( + randomSubsetOf(initialConfiguration.getNodeIds().size(), nodeIdsWithPlaceholders))); try { - coordinator.setInitialConfiguration(initialConfiguration); - logger.info("successfully set initial configuration to {}", initialConfiguration); + coordinator.setInitialConfiguration(configurationWithPlaceholders); + logger.info("successfully set initial configuration to {}", configurationWithPlaceholders); } catch (CoordinationStateRejectedException e) { - logger.info(new ParameterizedMessage("failed to set initial configuration to {}", initialConfiguration), e); + logger.info(new ParameterizedMessage("failed to set initial configuration to {}", + configurationWithPlaceholders), e); } }).run(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java index 6d985d46ef6e4..4f1016847c887 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java @@ -31,7 +31,9 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.elasticsearch.test.EqualsHashCodeTestUtils.CopyFunction; +import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.test.transport.MockTransport; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportException; @@ -41,12 +43,21 @@ import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptySet; import static org.elasticsearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_ACTION_NAME; @@ -62,6 +73,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsInstanceOf.instanceOf; +import static org.mockito.Mockito.mock; public class FollowersCheckerTests extends ESTestCase { @@ -536,6 +548,46 @@ public String executor() { } } + private void testPreferMasterNodes() { + List nodes = randomNodes(10); + DiscoveryNodes.Builder discoNodesBuilder = DiscoveryNodes.builder(); + nodes.forEach(dn -> discoNodesBuilder.add(dn)); + DiscoveryNodes discoveryNodes = discoNodesBuilder.localNodeId(nodes.get(0).getId()).build(); + CapturingTransport capturingTransport = new CapturingTransport(); + TransportService transportService = capturingTransport.createTransportService(Settings.EMPTY, mock(ThreadPool.class), + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> nodes.get(0), null, emptySet()); + final FollowersChecker followersChecker = new FollowersChecker(Settings.EMPTY, transportService, fcr -> { + assert false : fcr; + }, (node, reason) -> { + assert false : node; + }); + followersChecker.setCurrentNodes(discoveryNodes); + List followerTargets = Stream.of(capturingTransport.getCapturedRequestsAndClear()) + .map(cr -> cr.node).collect(Collectors.toList()); + List sortedFollowerTargets = new ArrayList<>(followerTargets); + Collections.sort(sortedFollowerTargets, Comparator.comparing(n -> n.isMasterNode() == false)); + assertEquals(sortedFollowerTargets, followerTargets); + } + + private static List randomNodes(final int numNodes) { + List nodesList = new ArrayList<>(); + for (int i = 0; i < numNodes; i++) { + Map attributes = new HashMap<>(); + if (frequently()) { + attributes.put("custom", randomBoolean() ? "match" : randomAlphaOfLengthBetween(3, 5)); + } + final DiscoveryNode node = newNode(i, attributes, + new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values())))); + nodesList.add(node); + } + return nodesList; + } + + private static DiscoveryNode newNode(int nodeId, Map attributes, Set roles) { + return new DiscoveryNode("name_" + nodeId, "node_" + nodeId, buildNewFakeTransportAddress(), attributes, roles, + Version.CURRENT); + } + private static class ExpectsSuccess implements TransportResponseHandler { private final AtomicBoolean responseReceived = new AtomicBoolean(); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java index 4435073d95363..658250bc7a4da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.coordination; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; @@ -33,10 +34,13 @@ import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -97,8 +101,8 @@ abstract class MockPublication extends Publication { boolean committed; - Map> pendingPublications = new HashMap<>(); - Map> pendingCommits = new HashMap<>(); + Map> pendingPublications = new LinkedHashMap<>(); + Map> pendingCommits = new LinkedHashMap<>(); Map joins = new HashMap<>(); Set missingJoins = new HashSet<>(); @@ -346,7 +350,7 @@ public void testClusterStatePublishingFailsOrTimesOutBeforeCommit() throws Inter publication.pendingPublications.entrySet().stream().collect(shuffle()).forEach(e -> { if (e.getKey().equals(n2)) { if (timeOut) { - publication.onTimeout(); + publication.cancel("timed out"); } else { e.getValue().onFailure(new TransportException(new Exception("dummy failure"))); } @@ -372,6 +376,22 @@ public void testClusterStatePublishingFailsOrTimesOutBeforeCommit() throws Inter tuple.v1().equals(n2) ? "dummy failure" : "non-failed nodes do not form a quorum"))); } + public void testPublishingToMastersFirst() { + VotingConfiguration singleNodeConfig = new VotingConfiguration(Sets.newHashSet(n1.getId())); + initializeCluster(singleNodeConfig); + + DiscoveryNodes.Builder discoNodesBuilder = DiscoveryNodes.builder(); + randomNodes(10).forEach(dn -> discoNodesBuilder.add(dn)); + DiscoveryNodes discoveryNodes = discoNodesBuilder.add(n1).localNodeId(n1.getId()).build(); + MockPublication publication = node1.publish(CoordinationStateTests.clusterState(1L, 2L, + discoveryNodes, singleNodeConfig, singleNodeConfig, 42L), null, Collections.emptySet()); + + List publicationTargets = new ArrayList<>(publication.pendingPublications.keySet()); + List sortedPublicationTargets = new ArrayList<>(publicationTargets); + Collections.sort(sortedPublicationTargets, Comparator.comparing(n -> n.isMasterNode() == false)); + assertEquals(sortedPublicationTargets, publicationTargets); + } + public void testClusterStatePublishingTimesOutAfterCommit() throws InterruptedException { VotingConfiguration config = new VotingConfiguration(randomBoolean() ? Sets.newHashSet(n1.getId(), n2.getId()) : Sets.newHashSet(n1.getId(), n2.getId(), n3.getId())); @@ -407,7 +427,7 @@ public void testClusterStatePublishingTimesOutAfterCommit() throws InterruptedEx } }); - publication.onTimeout(); + publication.cancel("timed out"); assertTrue(publication.completed); assertTrue(publication.committed); assertEquals(committingNodes, ackListener.await(0L, TimeUnit.SECONDS)); @@ -428,6 +448,25 @@ public void testClusterStatePublishingTimesOutAfterCommit() throws InterruptedEx assertEquals(discoNodes, ackListener.await(0L, TimeUnit.SECONDS)); } + private static List randomNodes(final int numNodes) { + List nodesList = new ArrayList<>(); + for (int i = 0; i < numNodes; i++) { + Map attributes = new HashMap<>(); + if (frequently()) { + attributes.put("custom", randomBoolean() ? "match" : randomAlphaOfLengthBetween(3, 5)); + } + final DiscoveryNode node = newNode(i, attributes, + new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values())))); + nodesList.add(node); + } + return nodesList; + } + + private static DiscoveryNode newNode(int nodeId, Map attributes, Set roles) { + return new DiscoveryNode("name_" + nodeId, "node_" + nodeId, buildNewFakeTransportAddress(), attributes, roles, + Version.CURRENT); + } + public static Collector> shuffle() { return Collectors.collectingAndThen(Collectors.toList(), ts -> { diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index cf0b1eebe983a..5c92029520e8d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -149,6 +150,18 @@ public void testResolveNodesIds() { assertThat(resolvedNodesIds, equalTo(expectedNodesIds)); } + public void testMastersFirst() { + final List inputNodes = randomNodes(10); + final DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + inputNodes.forEach(discoBuilder::add); + final List returnedNodes = discoBuilder.build().mastersFirstStream().collect(Collectors.toList()); + assertEquals(returnedNodes.size(), inputNodes.size()); + assertEquals(new HashSet<>(returnedNodes), new HashSet<>(inputNodes)); + final List sortedNodes = new ArrayList<>(returnedNodes); + Collections.sort(sortedNodes, Comparator.comparing(n -> n.isMasterNode() == false)); + assertEquals(sortedNodes, returnedNodes); + } + public void testDeltas() { Set nodesA = new HashSet<>(); nodesA.addAll(randomNodes(1 + randomInt(10))); diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index a2858284593d1..feb406c61c966 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -24,7 +24,9 @@ import java.time.Instant; import java.time.ZoneId; import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.time.format.DateTimeParseException; +import java.time.temporal.ChronoField; import java.time.temporal.TemporalAccessor; import java.util.Locale; @@ -157,4 +159,56 @@ public void testParsingStrictNanoDates() { formatter.format(formatter.parse("2018-05-15T17:14:56.123456789+0100")); formatter.format(formatter.parse("2018-05-15T17:14:56.123456789+01:00")); } + + public void testRoundupFormatterWithEpochDates() { + assertRoundupFormatter("8epoch_millis", "1234567890", 1234567890L); + // also check nanos of the epoch_millis formatter if it is rounded up to the nano second + DateTimeFormatter roundUpFormatter = ((JavaDateFormatter) DateFormatter.forPattern("8epoch_millis")).getRoundupParser(); + Instant epochMilliInstant = DateFormatters.toZonedDateTime(roundUpFormatter.parse("1234567890")).toInstant(); + assertThat(epochMilliInstant.getLong(ChronoField.NANO_OF_SECOND), is(890_999_999L)); + + assertRoundupFormatter("8strict_date_optional_time||epoch_millis", "2018-10-10T12:13:14.123Z", 1539173594123L); + assertRoundupFormatter("8strict_date_optional_time||epoch_millis", "1234567890", 1234567890L); + assertRoundupFormatter("8uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_millis", "2018-10-10T12:13:14.123", 1539173594123L); + assertRoundupFormatter("8uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_millis", "1234567890", 1234567890L); + + assertRoundupFormatter("8epoch_second", "1234567890", 1234567890999L); + // also check nanos of the epoch_millis formatter if it is rounded up to the nano second + DateTimeFormatter epochSecondRoundupParser = ((JavaDateFormatter) DateFormatter.forPattern("8epoch_second")).getRoundupParser(); + Instant epochSecondInstant = DateFormatters.toZonedDateTime(epochSecondRoundupParser.parse("1234567890")).toInstant(); + assertThat(epochSecondInstant.getLong(ChronoField.NANO_OF_SECOND), is(999_999_999L)); + + assertRoundupFormatter("8strict_date_optional_time||epoch_second", "2018-10-10T12:13:14.123Z", 1539173594123L); + assertRoundupFormatter("8strict_date_optional_time||epoch_second", "1234567890", 1234567890999L); + assertRoundupFormatter("8uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_second", "2018-10-10T12:13:14.123", 1539173594123L); + assertRoundupFormatter("8uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_second", "1234567890", 1234567890999L); + } + + private void assertRoundupFormatter(String format, String input, long expectedMilliSeconds) { + JavaDateFormatter dateFormatter = (JavaDateFormatter) DateFormatter.forPattern(format); + dateFormatter.parse(input); + DateTimeFormatter roundUpFormatter = dateFormatter.getRoundupParser(); + long millis = DateFormatters.toZonedDateTime(roundUpFormatter.parse(input)).toInstant().toEpochMilli(); + assertThat(millis, is(expectedMilliSeconds)); + } + + public void testRoundupFormatterZone() { + ZoneId zoneId = randomZone(); + String format = randomFrom("8epoch_second", "8epoch_millis", "8strict_date_optional_time", "8uuuu-MM-dd'T'HH:mm:ss.SSS", + "8strict_date_optional_time||date_optional_time"); + JavaDateFormatter formatter = (JavaDateFormatter) DateFormatter.forPattern(format).withZone(zoneId); + DateTimeFormatter roundUpFormatter = formatter.getRoundupParser(); + assertThat(roundUpFormatter.getZone(), is(zoneId)); + assertThat(formatter.zone(), is(zoneId)); + } + + public void testRoundupFormatterLocale() { + Locale locale = randomLocale(random()); + String format = randomFrom("8epoch_second", "8epoch_millis", "8strict_date_optional_time", "8uuuu-MM-dd'T'HH:mm:ss.SSS", + "8strict_date_optional_time||date_optional_time"); + JavaDateFormatter formatter = (JavaDateFormatter) DateFormatter.forPattern(format).withLocale(locale); + DateTimeFormatter roundupParser = formatter.getRoundupParser(); + assertThat(roundupParser.getLocale(), is(locale)); + assertThat(formatter.locale(), is(locale)); + } } diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 6f47590089f28..efe69ff99e20b 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -95,6 +95,7 @@ public void testIsolatedUnicastNodes() throws Exception { * The temporal unicast responses is empty. When partition is solved the one ping response contains a master node. * The rejoining node should take this master node and connect. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37687") public void testUnicastSinglePingResponseContainsMaster() throws Exception { internalCluster().setHostsListContainsOnlyFirstNode(true); List nodes = startCluster(4); @@ -131,6 +132,7 @@ public void testUnicastSinglePingResponseContainsMaster() throws Exception { /** * Test cluster join with issues in cluster state publishing * */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37685") public void testClusterJoinDespiteOfPublishingIssues() throws Exception { String masterNode = internalCluster().startMasterOnlyNode(); String nonMasterNode = internalCluster().startDataOnlyNode(); @@ -200,6 +202,7 @@ public void testClusterFormingWithASlowNode() throws Exception { ensureStableCluster(3); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37539") public void testElectMasterWithLatestVersion() throws Exception { final Set nodes = new HashSet<>(internalCluster().startNodes(3)); ensureStableCluster(3); diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 9051f302a591f..e237415a9c60e 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -53,6 +54,7 @@ import java.util.function.Supplier; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class DiscoveryModuleTests extends ESTestCase { @@ -87,11 +89,12 @@ default Map> getDiscoveryTypes(ThreadPool threadPool @Before public void setupDummyServices() { - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, null, null); + threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); masterService = mock(MasterService.class); namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); clusterApplier = mock(ClusterApplier.class); - threadPool = mock(ThreadPool.class); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); gatewayMetaState = mock(GatewayMetaState.class); } diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java index cc9295cee2e3f..b04a1cebbf3e3 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java @@ -62,6 +62,7 @@ public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(FileBasedUnicastHostsProviderTests.class.getName()); executorService = Executors.newSingleThreadExecutor(); + createTransportSvc(); } @After @@ -77,8 +78,7 @@ public void tearDown() throws Exception { } } - @Before - public void createTransportSvc() { + private void createTransportSvc() { final MockNioTransport transport = new MockNioTransport(Settings.EMPTY, Version.CURRENT, threadPool, new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java new file mode 100644 index 0000000000000..bdca86858701f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.startsWith; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class NodeEnvironmentIT extends ESIntegTestCase { + public void testStartFailureOnDataForNonDataNode() throws Exception { + final String indexName = "test-fail-on-data"; + + logger.info("--> starting one node"); + internalCluster().startNode(); + + logger.info("--> creating index"); + prepareCreate(indexName, Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + ).get(); + final String indexUUID = resolveIndex(indexName).getUUID(); + + logger.info("--> indexing a simple document"); + client().prepareIndex(indexName, "type1", "1").setSource("field1", "value1").get(); + + logger.info("--> restarting the node with node.data=true"); + internalCluster().restartRandomDataNode(); + + logger.info("--> restarting the node with node.data=false"); + IllegalStateException ex = expectThrows(IllegalStateException.class, + "Node started with node.data=false and existing shard data must fail", + () -> + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).build(); + } + })); + assertThat(ex.getMessage(), containsString(indexUUID)); + assertThat(ex.getMessage(), + startsWith("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false, but has shard data")); + } +} diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 55ab02c1dcfb9..2771bc9f243ac 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -52,6 +53,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; @LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to allow extras public class NodeEnvironmentTests extends ESTestCase { @@ -470,6 +472,47 @@ public void testExistingTempFiles() throws IOException { } } + public void testEnsureNoShardData() throws IOException { + Settings settings = buildEnvSettings(Settings.EMPTY); + Index index = new Index("test", "testUUID"); + + try (NodeEnvironment env = newNodeEnvironment(settings)) { + for (Path path : env.indexPaths(index)) { + Files.createDirectories(path.resolve(MetaDataStateFormat.STATE_DIR_NAME)); + } + } + + // build settings using same path.data as original but with node.data=false + Settings noDataSettings = Settings.builder() + .put(settings) + .put(Node.NODE_DATA_SETTING.getKey(), false).build(); + + String shardDataDirName = Integer.toString(randomInt(10)); + Path shardPath; + + // test that we can create data=false env with only meta information + try (NodeEnvironment env = newNodeEnvironment(noDataSettings)) { + for (Path path : env.indexPaths(index)) { + Files.createDirectories(path.resolve(shardDataDirName)); + } + shardPath = env.indexPaths(index)[0]; + } + + IllegalStateException ex = expectThrows(IllegalStateException.class, + "Must fail creating NodeEnvironment on a data path that has shard data if node.data=false", + () -> newNodeEnvironment(noDataSettings).close()); + + assertThat(ex.getMessage(), + containsString(shardPath.resolve(shardDataDirName).toAbsolutePath().toString())); + assertThat(ex.getMessage(), + startsWith("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false, but has shard data")); + + // test that we can create data=true env + newNodeEnvironment(settings).close(); + } + /** Converts an array of Strings to an array of Paths, adding an additional child if specified */ private Path[] stringsToPaths(String[] strings, String additional) { Path[] locations = new Path[strings.length]; diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java index e5cdcc1ac7320..b345afe9b8f89 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -70,7 +70,7 @@ public void testReadOnlyEngine() throws Exception { lastDocIds = getDocIds(engine, true); assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); - assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); + assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); for (int i = 0; i < numDocs; i++) { if (randomBoolean()) { String delId = Integer.toString(i); @@ -126,7 +126,7 @@ public void testFlushes() throws IOException { if (rarely()) { engine.flush(); } - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + globalCheckpoint.set(i); } engine.syncTranslog(); engine.flushAndClose(); @@ -139,6 +139,40 @@ public void testFlushes() throws IOException { } } + public void testEnsureMaxSeqNoIsEqualToGlobalCheckpoint() throws IOException { + IOUtils.close(engine, store); + Engine readOnlyEngine = null; + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + final int numDocs = scaledRandomIntBetween(10, 100); + try (InternalEngine engine = createEngine(config)) { + long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + for (int i = 0; i < numDocs; i++) { + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); + engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, + System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); + maxSeqNo = engine.getLocalCheckpoint(); + } + globalCheckpoint.set(engine.getLocalCheckpoint() - 1); + engine.syncTranslog(); + engine.flushAndClose(); + + IllegalStateException exception = expectThrows(IllegalStateException.class, + () -> new ReadOnlyEngine(engine.engineConfig, null, null, true, Function.identity()) { + @Override + protected void assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) { + // we don't want the assertion to trip in this test + } + }); + assertThat(exception.getMessage(), equalTo("Maximum sequence number [" + maxSeqNo + + "] from last commit does not match global checkpoint [" + globalCheckpoint.get() + "]")); + } finally { + IOUtils.close(readOnlyEngine); + } + } + } + public void testReadOnly() throws IOException { IOUtils.close(engine, store); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 5fca37a71886d..f67ce8de1422a 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkShardRequest; @@ -198,13 +199,14 @@ public IndexResult index(Index op) throws IOException { Future fut = shards.asyncRecoverReplica(replica, (shard, node) -> new RecoveryTarget(shard, node, recoveryListener, v -> {}){ @Override - public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, + ActionListener listener) { try { indexedOnPrimary.await(); } catch (InterruptedException e) { throw new AssertionError(e); } - super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps); + super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps, listener); } }); fut.get(); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 0d58239fd4a56..1ea90ec6e8fbc 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2573,8 +2573,8 @@ public void testRefreshListenersDuringPeerRecovery() throws IOException { }) { // we're only checking that listeners are called when the engine is open, before there is no point @Override - public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) throws IOException { - super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps); + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { + super.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps, listener); assertListenerCalled.accept(replica); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 0cecc925b2488..cd495b7e17bee 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -491,9 +491,9 @@ public SendFileResult phase1(final IndexCommit snapshot, final Supplier } @Override - TimeValue prepareTargetForTranslog(final boolean fileBasedRecovery, final int totalTranslogOps) throws IOException { + void prepareTargetForTranslog(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { prepareTargetForTranslogCalled.set(true); - return super.prepareTargetForTranslog(fileBasedRecovery, totalTranslogOps); + super.prepareTargetForTranslog(fileBasedRecovery, totalTranslogOps, listener); } @Override @@ -700,7 +700,7 @@ private List generateFiles(Store store, int numFiles, IntSupp class TestRecoveryTargetHandler implements RecoveryTargetHandler { @Override - public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps) { + public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { } @Override diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java index 2a180cc12dd19..c25ca3cb7db63 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java @@ -21,10 +21,14 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData.Custom; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -33,9 +37,11 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -65,6 +71,8 @@ import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializationTestCase { @@ -307,6 +315,91 @@ public void testFeatureSerialization() throws IOException { assertThat(read.taskMap().keySet(), equalTo(Collections.singleton("test_compatible"))); } + public void testDisassociateDeadNodes_givenNoPersistentTasks() { + ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")).build(); + ClusterState returnedState = PersistentTasksCustomMetaData.deassociateDeadNodes(originalState); + assertThat(originalState, sameInstance(returnedState)); + } + + public void testDisassociateDeadNodes_givenAssignedPersistentTask() { + DiscoveryNodes nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) + .localNodeId("node1") + .masterNodeId("node1") + .build(); + + String taskName = "test/task"; + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder() + .addTask("task-id", taskName, emptyTaskParams(taskName), + new PersistentTasksCustomMetaData.Assignment("node1", "test assignment")); + + ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")) + .nodes(nodes) + .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) + .build(); + ClusterState returnedState = PersistentTasksCustomMetaData.deassociateDeadNodes(originalState); + assertThat(originalState, sameInstance(returnedState)); + + PersistentTasksCustomMetaData originalTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(originalState); + PersistentTasksCustomMetaData returnedTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(returnedState); + assertEquals(originalTasks, returnedTasks); + } + + public void testDisassociateDeadNodes() { + DiscoveryNodes nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) + .localNodeId("node1") + .masterNodeId("node1") + .build(); + + String taskName = "test/task"; + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder() + .addTask("assigned-task", taskName, emptyTaskParams(taskName), + new PersistentTasksCustomMetaData.Assignment("node1", "test assignment")) + .addTask("task-on-deceased-node", taskName, emptyTaskParams(taskName), + new PersistentTasksCustomMetaData.Assignment("left-the-cluster", "test assignment")); + + ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")) + .nodes(nodes) + .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) + .build(); + ClusterState returnedState = PersistentTasksCustomMetaData.deassociateDeadNodes(originalState); + assertThat(originalState, not(sameInstance(returnedState))); + + PersistentTasksCustomMetaData originalTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(originalState); + PersistentTasksCustomMetaData returnedTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(returnedState); + assertNotEquals(originalTasks, returnedTasks); + + assertEquals(originalTasks.getTask("assigned-task"), returnedTasks.getTask("assigned-task")); + assertNotEquals(originalTasks.getTask("task-on-deceased-node"), returnedTasks.getTask("task-on-deceased-node")); + assertEquals(PersistentTasksCustomMetaData.LOST_NODE_ASSIGNMENT, returnedTasks.getTask("task-on-deceased-node").getAssignment()); + } + + private PersistentTaskParams emptyTaskParams(String taskName) { + return new PersistentTaskParams() { + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) { + return builder; + } + + @Override + public void writeTo(StreamOutput out) { + + } + + @Override + public String getWriteableName() { + return taskName; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + }; + } + private Assignment randomAssignment() { if (randomBoolean()) { if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java index 1531744a13cac..24f97b67c1458 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java @@ -24,11 +24,13 @@ import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; @@ -45,13 +47,14 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; +import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; import org.elasticsearch.cluster.coordination.CoordinationState; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.CoordinatorTests; import org.elasticsearch.cluster.coordination.DeterministicTaskQueue; import org.elasticsearch.cluster.coordination.InMemoryPersistedState; import org.elasticsearch.cluster.coordination.MockSinglePrioritizingExecutor; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -196,6 +199,53 @@ public void testSuccessfulSnapshot() { assertEquals(0, snapshotInfo.failedShards()); } + public void testConcurrentSnapshotCreateAndDelete() { + setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + + TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + final AtomicBoolean createdSnapshot = new AtomicBoolean(); + masterNode.client.admin().cluster().preparePutRepository(repoName) + .setType(FsRepository.TYPE).setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) + .execute( + assertNoFailureListener( + () -> masterNode.client.admin().indices().create( + new CreateIndexRequest(index).waitForActiveShards(ActiveShardCount.ALL).settings( + Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shards) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)), + assertNoFailureListener( + () -> masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + .execute(assertNoFailureListener( + () -> masterNode.client.admin().cluster().deleteSnapshot( + new DeleteSnapshotRequest(repoName, snapshotName), + assertNoFailureListener(() -> masterNode.client.admin().cluster() + .prepareCreateSnapshot(repoName, snapshotName).execute( + assertNoFailureListener(() -> createdSnapshot.set(true)) + ))))))))); + + deterministicTaskQueue.runAllRunnableTasks(); + + assertTrue(createdSnapshot.get()); + SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); + final Repository repository = masterNode.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, hasSize(1)); + + final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); + assertEquals(shards, snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + } + private void startCluster() { final ClusterState initialClusterState = new ClusterState.Builder(ClusterName.DEFAULT).nodes(testClusterNodes.randomDiscoveryNodes()).build(); @@ -204,14 +254,10 @@ private void startCluster() { deterministicTaskQueue.advanceTime(); deterministicTaskQueue.runAllRunnableTasks(); - final BootstrapConfiguration bootstrapConfiguration = new BootstrapConfiguration( - testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()) - .map(node -> new BootstrapConfiguration.NodeDescription(node.node)) - .distinct() - .collect(Collectors.toList())); + final VotingConfiguration votingConfiguration = new VotingConfiguration(testClusterNodes.nodes.values().stream().map(n -> n.node) + .filter(DiscoveryNode::isMasterNode).map(DiscoveryNode::getId).collect(Collectors.toSet())); testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()).forEach( - testClusterNode -> testClusterNode.coordinator.setInitialConfiguration(bootstrapConfiguration) - ); + testClusterNode -> testClusterNode.coordinator.setInitialConfiguration(votingConfiguration)); runUntil( () -> { @@ -519,6 +565,11 @@ allocationService, new AliasValidator(), environment, indexScopedSettings, transportService, clusterService, threadPool, snapshotsService, actionFilters, indexNameExpressionResolver )); + actions.put(DeleteSnapshotAction.INSTANCE, + new TransportDeleteSnapshotAction( + transportService, clusterService, threadPool, + snapshotsService, actionFilters, indexNameExpressionResolver + )); client.initialize(actions, () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); } diff --git a/server/src/test/java/org/elasticsearch/transport/CompressibleBytesOutputStreamTests.java b/server/src/test/java/org/elasticsearch/transport/CompressibleBytesOutputStreamTests.java index 58dc2a1e55b47..aeb92dac73479 100644 --- a/server/src/test/java/org/elasticsearch/transport/CompressibleBytesOutputStreamTests.java +++ b/server/src/test/java/org/elasticsearch/transport/CompressibleBytesOutputStreamTests.java @@ -39,6 +39,8 @@ public void testStreamWithoutCompression() throws IOException { stream.write(expectedBytes); BytesReference bytesRef = stream.materializeBytes(); + // Closing compression stream does not close underlying stream + stream.close(); assertFalse(CompressorFactory.COMPRESSOR.isCompressed(bytesRef)); @@ -48,7 +50,8 @@ public void testStreamWithoutCompression() throws IOException { assertEquals(-1, streamInput.read()); assertArrayEquals(expectedBytes, actualBytes); - stream.close(); + + bStream.close(); // The bytes should be zeroed out on close for (byte b : bytesRef.toBytesRef().bytes) { @@ -64,6 +67,7 @@ public void testStreamWithCompression() throws IOException { stream.write(expectedBytes); BytesReference bytesRef = stream.materializeBytes(); + stream.close(); assertTrue(CompressorFactory.COMPRESSOR.isCompressed(bytesRef)); @@ -73,7 +77,8 @@ public void testStreamWithCompression() throws IOException { assertEquals(-1, streamInput.read()); assertArrayEquals(expectedBytes, actualBytes); - stream.close(); + + bStream.close(); // The bytes should be zeroed out on close for (byte b : bytesRef.toBytesRef().bytes) { diff --git a/server/src/test/java/org/elasticsearch/transport/InboundMessageTests.java b/server/src/test/java/org/elasticsearch/transport/InboundMessageTests.java new file mode 100644 index 0000000000000..499b6586543ed --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/InboundMessageTests.java @@ -0,0 +1,228 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; + +public class InboundMessageTests extends ESTestCase { + + private final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + private final NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList()); + + public void testReadRequest() throws IOException { + String[] features = {"feature1", "feature2"}; + String value = randomAlphaOfLength(10); + Message message = new Message(value); + String action = randomAlphaOfLength(10); + long requestId = randomLong(); + boolean isHandshake = randomBoolean(); + boolean compress = randomBoolean(); + threadContext.putHeader("header", "header_value"); + Version version = randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()); + OutboundMessage.Request request = new OutboundMessage.Request(threadContext, features, message, version, action, requestId, + isHandshake, compress); + BytesReference reference; + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + reference = request.serialize(streamOutput); + } + // Check that the thread context is not deleted. + assertEquals("header_value", threadContext.getHeader("header")); + + threadContext.stashContext(); + threadContext.putHeader("header", "header_value2"); + + InboundMessage.Reader reader = new InboundMessage.Reader(version, registry, threadContext); + BytesReference sliced = reference.slice(6, reference.length() - 6); + InboundMessage.RequestMessage inboundMessage = (InboundMessage.RequestMessage) reader.deserialize(sliced); + // Check that deserialize does not overwrite current thread context. + assertEquals("header_value2", threadContext.getHeader("header")); + inboundMessage.getStoredContext().restore(); + assertEquals("header_value", threadContext.getHeader("header")); + assertEquals(isHandshake, inboundMessage.isHandshake()); + assertEquals(compress, inboundMessage.isCompress()); + assertEquals(version, inboundMessage.getVersion()); + assertEquals(action, inboundMessage.getActionName()); + assertEquals(new HashSet<>(Arrays.asList(features)), inboundMessage.getFeatures()); + assertTrue(inboundMessage.isRequest()); + assertFalse(inboundMessage.isResponse()); + assertFalse(inboundMessage.isError()); + assertEquals(value, new Message(inboundMessage.getStreamInput()).value); + } + + public void testReadResponse() throws IOException { + HashSet features = new HashSet<>(Arrays.asList("feature1", "feature2")); + String value = randomAlphaOfLength(10); + Message message = new Message(value); + long requestId = randomLong(); + boolean isHandshake = randomBoolean(); + boolean compress = randomBoolean(); + threadContext.putHeader("header", "header_value"); + Version version = randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()); + OutboundMessage.Response request = new OutboundMessage.Response(threadContext, features, message, version, requestId, isHandshake, + compress); + BytesReference reference; + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + reference = request.serialize(streamOutput); + } + // Check that the thread context is not deleted. + assertEquals("header_value", threadContext.getHeader("header")); + + threadContext.stashContext(); + threadContext.putHeader("header", "header_value2"); + + InboundMessage.Reader reader = new InboundMessage.Reader(version, registry, threadContext); + BytesReference sliced = reference.slice(6, reference.length() - 6); + InboundMessage.ResponseMessage inboundMessage = (InboundMessage.ResponseMessage) reader.deserialize(sliced); + // Check that deserialize does not overwrite current thread context. + assertEquals("header_value2", threadContext.getHeader("header")); + inboundMessage.getStoredContext().restore(); + assertEquals("header_value", threadContext.getHeader("header")); + assertEquals(isHandshake, inboundMessage.isHandshake()); + assertEquals(compress, inboundMessage.isCompress()); + assertEquals(version, inboundMessage.getVersion()); + assertTrue(inboundMessage.isResponse()); + assertFalse(inboundMessage.isRequest()); + assertFalse(inboundMessage.isError()); + assertEquals(value, new Message(inboundMessage.getStreamInput()).value); + } + + public void testReadErrorResponse() throws IOException { + HashSet features = new HashSet<>(Arrays.asList("feature1", "feature2")); + RemoteTransportException exception = new RemoteTransportException("error", new IOException()); + long requestId = randomLong(); + boolean isHandshake = randomBoolean(); + boolean compress = randomBoolean(); + threadContext.putHeader("header", "header_value"); + Version version = randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()); + OutboundMessage.Response request = new OutboundMessage.Response(threadContext, features, exception, version, requestId, + isHandshake, compress); + BytesReference reference; + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + reference = request.serialize(streamOutput); + } + // Check that the thread context is not deleted. + assertEquals("header_value", threadContext.getHeader("header")); + + threadContext.stashContext(); + threadContext.putHeader("header", "header_value2"); + + InboundMessage.Reader reader = new InboundMessage.Reader(version, registry, threadContext); + BytesReference sliced = reference.slice(6, reference.length() - 6); + InboundMessage.ResponseMessage inboundMessage = (InboundMessage.ResponseMessage) reader.deserialize(sliced); + // Check that deserialize does not overwrite current thread context. + assertEquals("header_value2", threadContext.getHeader("header")); + inboundMessage.getStoredContext().restore(); + assertEquals("header_value", threadContext.getHeader("header")); + assertEquals(isHandshake, inboundMessage.isHandshake()); + assertEquals(compress, inboundMessage.isCompress()); + assertEquals(version, inboundMessage.getVersion()); + assertTrue(inboundMessage.isResponse()); + assertFalse(inboundMessage.isRequest()); + assertTrue(inboundMessage.isError()); + assertEquals("[error]", inboundMessage.getStreamInput().readException().getMessage()); + } + + public void testEnsureVersionCompatibility() throws IOException { + testVersionIncompatibility(VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), + Version.CURRENT), Version.CURRENT, randomBoolean()); + + final Version version = Version.fromString("7.0.0"); + testVersionIncompatibility(Version.fromString("6.0.0"), version, true); + IllegalStateException ise = expectThrows(IllegalStateException.class, () -> + testVersionIncompatibility(Version.fromString("6.0.0"), version, false)); + assertEquals("Received message from unsupported version: [6.0.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); + + // For handshake we are compatible with N-2 + testVersionIncompatibility(Version.fromString("5.6.0"), version, true); + ise = expectThrows(IllegalStateException.class, () -> + testVersionIncompatibility(Version.fromString("5.6.0"), version, false)); + assertEquals("Received message from unsupported version: [5.6.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); + + ise = expectThrows(IllegalStateException.class, () -> + testVersionIncompatibility(Version.fromString("2.3.0"), version, true)); + assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); + + ise = expectThrows(IllegalStateException.class, () -> + testVersionIncompatibility(Version.fromString("2.3.0"), version, false)); + assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [" + + version.minimumCompatibilityVersion() + "]", ise.getMessage()); + } + + private void testVersionIncompatibility(Version version, Version currentVersion, boolean isHandshake) throws IOException { + String[] features = {}; + String value = randomAlphaOfLength(10); + Message message = new Message(value); + String action = randomAlphaOfLength(10); + long requestId = randomLong(); + boolean compress = randomBoolean(); + OutboundMessage.Request request = new OutboundMessage.Request(threadContext, features, message, version, action, requestId, + isHandshake, compress); + BytesReference reference; + try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { + reference = request.serialize(streamOutput); + } + + BytesReference sliced = reference.slice(6, reference.length() - 6); + InboundMessage.Reader reader = new InboundMessage.Reader(currentVersion, registry, threadContext); + reader.deserialize(sliced); + } + + private static final class Message extends TransportMessage { + + public String value; + + private Message() { + } + + private Message(StreamInput in) throws IOException { + readFrom(in); + } + + private Message(String value) { + this.value = value; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + value = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java new file mode 100644 index 0000000000000..01e391a30a732 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -0,0 +1,184 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashSet; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +public class OutboundHandlerTests extends ESTestCase { + + private final TestThreadPool threadPool = new TestThreadPool(getClass().getName()); + private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + private OutboundHandler handler; + private FakeTcpChannel fakeTcpChannel; + + @Before + public void setUp() throws Exception { + super.setUp(); + TransportLogger transportLogger = new TransportLogger(); + fakeTcpChannel = new FakeTcpChannel(randomBoolean()); + handler = new OutboundHandler(threadPool, BigArrays.NON_RECYCLING_INSTANCE, transportLogger); + } + + @After + public void tearDown() throws Exception { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + super.tearDown(); + } + + public void testSendRawBytes() { + BytesArray bytesArray = new BytesArray("message".getBytes(StandardCharsets.UTF_8)); + + AtomicBoolean isSuccess = new AtomicBoolean(false); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap((v) -> isSuccess.set(true), exception::set); + handler.sendBytes(fakeTcpChannel, bytesArray, listener); + + BytesReference reference = fakeTcpChannel.getMessageCaptor().get(); + ActionListener sendListener = fakeTcpChannel.getListenerCaptor().get(); + if (randomBoolean()) { + sendListener.onResponse(null); + assertTrue(isSuccess.get()); + assertNull(exception.get()); + } else { + IOException e = new IOException("failed"); + sendListener.onFailure(e); + assertFalse(isSuccess.get()); + assertSame(e, exception.get()); + } + + assertEquals(bytesArray, reference); + } + + public void testSendMessage() throws IOException { + OutboundMessage message; + ThreadContext threadContext = threadPool.getThreadContext(); + Version version = Version.CURRENT; + String actionName = "handshake"; + long requestId = randomLongBetween(0, 300); + boolean isHandshake = randomBoolean(); + boolean compress = randomBoolean(); + String value = "message"; + threadContext.putHeader("header", "header_value"); + Writeable writeable = new Message(value); + + boolean isRequest = randomBoolean(); + if (isRequest) { + message = new OutboundMessage.Request(threadContext, new String[0], writeable, version, actionName, requestId, isHandshake, + compress); + } else { + message = new OutboundMessage.Response(threadContext, new HashSet<>(), writeable, version, requestId, isHandshake, compress); + } + + AtomicBoolean isSuccess = new AtomicBoolean(false); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap((v) -> isSuccess.set(true), exception::set); + handler.sendMessage(fakeTcpChannel, message, listener); + + BytesReference reference = fakeTcpChannel.getMessageCaptor().get(); + ActionListener sendListener = fakeTcpChannel.getListenerCaptor().get(); + if (randomBoolean()) { + sendListener.onResponse(null); + assertTrue(isSuccess.get()); + assertNull(exception.get()); + } else { + IOException e = new IOException("failed"); + sendListener.onFailure(e); + assertFalse(isSuccess.get()); + assertSame(e, exception.get()); + } + + InboundMessage.Reader reader = new InboundMessage.Reader(Version.CURRENT, namedWriteableRegistry, threadPool.getThreadContext()); + try (InboundMessage inboundMessage = reader.deserialize(reference.slice(6, reference.length() - 6))) { + assertEquals(version, inboundMessage.getVersion()); + assertEquals(requestId, inboundMessage.getRequestId()); + if (isRequest) { + assertTrue(inboundMessage.isRequest()); + assertFalse(inboundMessage.isResponse()); + } else { + assertTrue(inboundMessage.isResponse()); + assertFalse(inboundMessage.isRequest()); + } + if (isHandshake) { + assertTrue(inboundMessage.isHandshake()); + } else { + assertFalse(inboundMessage.isHandshake()); + } + if (compress) { + assertTrue(inboundMessage.isCompress()); + } else { + assertFalse(inboundMessage.isCompress()); + } + Message readMessage = new Message(); + readMessage.readFrom(inboundMessage.getStreamInput()); + assertEquals(value, readMessage.value); + + try (ThreadContext.StoredContext existing = threadContext.stashContext()) { + ThreadContext.StoredContext storedContext = inboundMessage.getStoredContext(); + assertNull(threadContext.getHeader("header")); + storedContext.restore(); + assertEquals("header_value", threadContext.getHeader("header")); + } + } + } + + private static final class Message extends TransportMessage { + + public String value; + + private Message() { + } + + private Message(String value) { + this.value = value; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + value = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index f3294366b8fe1..a25ac2a551a22 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -37,7 +37,6 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -158,41 +157,12 @@ public void testAddressLimit() throws Exception { assertEquals(102, addresses[2].getPort()); } - public void testEnsureVersionCompatibility() { - TcpTransport.ensureVersionCompatibility(VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), - Version.CURRENT), Version.CURRENT, randomBoolean()); - - final Version version = Version.fromString("7.0.0"); - TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), version, true); - IllegalStateException ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("6.0.0"), version, false)); - assertEquals("Received message from unsupported version: [6.0.0] minimal compatible version is: [" - + version.minimumCompatibilityVersion() + "]", ise.getMessage()); - - // For handshake we are compatible with N-2 - TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), version, true); - ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("5.6.0"), version, false)); - assertEquals("Received message from unsupported version: [5.6.0] minimal compatible version is: [" - + version.minimumCompatibilityVersion() + "]", ise.getMessage()); - - ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), version, true)); - assertEquals("Received handshake message from unsupported version: [2.3.0] minimal compatible version is: [" - + version.minimumCompatibilityVersion() + "]", ise.getMessage()); - - ise = expectThrows(IllegalStateException.class, () -> - TcpTransport.ensureVersionCompatibility(Version.fromString("2.3.0"), version, false)); - assertEquals("Received message from unsupported version: [2.3.0] minimal compatible version is: [" - + version.minimumCompatibilityVersion() + "]", ise.getMessage()); - } - @SuppressForbidden(reason = "Allow accessing localhost") public void testCompressRequestAndResponse() throws IOException { final boolean compressed = randomBoolean(); Req request = new Req(randomRealisticUnicodeOfLengthBetween(10, 100)); ThreadPool threadPool = new TestThreadPool(TcpTransportTests.class.getName()); - AtomicReference requestCaptor = new AtomicReference<>(); + AtomicReference messageCaptor = new AtomicReference<>(); try { TcpTransport transport = new TcpTransport("test", Settings.EMPTY, Version.CURRENT, threadPool, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), null, null) { @@ -204,7 +174,7 @@ protected FakeServerChannel bind(String name, InetSocketAddress address) throws @Override protected FakeTcpChannel initiateChannel(DiscoveryNode node) throws IOException { - return new FakeTcpChannel(false, requestCaptor); + return new FakeTcpChannel(false); } @Override @@ -219,7 +189,7 @@ public Releasable openConnection(DiscoveryNode node, ConnectionProfile profile, int numConnections = profile.getNumConnections(); ArrayList fakeChannels = new ArrayList<>(numConnections); for (int i = 0; i < numConnections; ++i) { - fakeChannels.add(new FakeTcpChannel(false, requestCaptor)); + fakeChannels.add(new FakeTcpChannel(false, messageCaptor)); } listener.onResponse(new NodeChannels(node, fakeChannels, profile, Version.CURRENT)); return () -> CloseableChannel.closeChannels(fakeChannels, false); @@ -241,12 +211,12 @@ public Releasable openConnection(DiscoveryNode node, ConnectionProfile profile, (request1, channel, task) -> channel.sendResponse(TransportResponse.Empty.INSTANCE), ThreadPool.Names.SAME, true, true)); - BytesReference reference = requestCaptor.get(); + BytesReference reference = messageCaptor.get(); assertNotNull(reference); AtomicReference responseCaptor = new AtomicReference<>(); InetSocketAddress address = new InetSocketAddress(InetAddress.getLocalHost(), 0); - FakeTcpChannel responseChannel = new FakeTcpChannel(true, address, address, responseCaptor); + FakeTcpChannel responseChannel = new FakeTcpChannel(true, address, address, "profile", responseCaptor); transport.messageReceived(reference.slice(6, reference.length() - 6), responseChannel); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 012c574f9e6aa..55e356d093e6e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1727,8 +1727,16 @@ private void restartNode(NodeAndClient nodeAndClient, RestartCallback callback) removeExclusions(excludedNodeIds); - nodeAndClient.recreateNode(newSettings, () -> rebuildUnicastHostFiles(emptyList())); - nodeAndClient.startNode(); + boolean success = false; + try { + nodeAndClient.recreateNode(newSettings, () -> rebuildUnicastHostFiles(emptyList())); + nodeAndClient.startNode(); + success = true; + } finally { + if (success == false) + nodes.remove(nodeAndClient.name); + } + if (activeDisruptionScheme != null) { activeDisruptionScheme.applyToNode(nodeAndClient.name, this); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 43542d48a6c39..87343d4c82087 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -2038,11 +2038,14 @@ public void testTcpHandshake() { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) { @Override - protected String handleRequest(TcpChannel mockChannel, String profileName, StreamInput stream, long requestId, - int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) + protected void handleRequest(TcpChannel channel, InboundMessage.RequestMessage request, int messageLengthBytes) throws IOException { - return super.handleRequest(mockChannel, profileName, stream, requestId, messageLengthBytes, version, remoteAddress, - (byte) (status & ~(1 << 3))); // we flip the isHandshake bit back and act like the handler is not found + // we flip the isHandshake bit back and act like the handler is not found + byte status = (byte) (request.status & ~(1 << 3)); + Version version = request.getVersion(); + InboundMessage.RequestMessage nonHandshakeRequest = new InboundMessage.RequestMessage(request.threadContext, version, + status, request.getRequestId(), request.getActionName(), request.getFeatures(), request.getStreamInput()); + super.handleRequest(channel, nonHandshakeRequest, messageLengthBytes); } }; diff --git a/test/framework/src/main/java/org/elasticsearch/transport/FakeTcpChannel.java b/test/framework/src/main/java/org/elasticsearch/transport/FakeTcpChannel.java index 63cacfbb093a8..bb392554305c0 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/FakeTcpChannel.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/FakeTcpChannel.java @@ -31,9 +31,10 @@ public class FakeTcpChannel implements TcpChannel { private final InetSocketAddress localAddress; private final InetSocketAddress remoteAddress; private final String profile; - private final AtomicReference messageCaptor; private final ChannelStats stats = new ChannelStats(); private final CompletableContext closeContext = new CompletableContext<>(); + private final AtomicReference messageCaptor; + private final AtomicReference> listenerCaptor; public FakeTcpChannel() { this(false, "profile", new AtomicReference<>()); @@ -47,11 +48,6 @@ public FakeTcpChannel(boolean isServer, AtomicReference messageC this(isServer, "profile", messageCaptor); } - public FakeTcpChannel(boolean isServer, InetSocketAddress localAddress, InetSocketAddress remoteAddress, - AtomicReference messageCaptor) { - this(isServer, localAddress, remoteAddress,"profile", messageCaptor); - } - public FakeTcpChannel(boolean isServer, String profile, AtomicReference messageCaptor) { this(isServer, null, null, profile, messageCaptor); @@ -64,6 +60,7 @@ public FakeTcpChannel(boolean isServer, InetSocketAddress localAddress, InetSock this.remoteAddress = remoteAddress; this.profile = profile; this.messageCaptor = messageCaptor; + this.listenerCaptor = new AtomicReference<>(); } @Override @@ -89,6 +86,7 @@ public InetSocketAddress getRemoteAddress() { @Override public void sendMessage(BytesReference reference, ActionListener listener) { messageCaptor.set(reference); + listenerCaptor.set(listener); } @Override @@ -115,4 +113,12 @@ public boolean isOpen() { public ChannelStats getChannelStats() { return stats; } + + public AtomicReference getMessageCaptor() { + return messageCaptor; + } + + public AtomicReference> getListenerCaptor() { + return listenerCaptor; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index cbadcfa1f38ac..a7dbce45d3c5d 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -37,7 +37,7 @@ import org.elasticsearch.nio.BytesWriteHandler; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; -import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioSelectorGroup; import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; @@ -69,7 +69,7 @@ public class MockNioTransport extends TcpTransport { private static final Logger logger = LogManager.getLogger(MockNioTransport.class); private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); - private volatile NioGroup nioGroup; + private volatile NioSelectorGroup nioGroup; private volatile MockTcpChannelFactory clientChannelFactory; public MockNioTransport(Settings settings, Version version, ThreadPool threadPool, NetworkService networkService, @@ -94,7 +94,7 @@ protected MockSocketChannel initiateChannel(DiscoveryNode node) throws IOExcepti protected void doStart() { boolean success = false; try { - nioGroup = new NioGroup(daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2, + nioGroup = new NioSelectorGroup(daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2, (s) -> new TestingSocketEventHandler(this::onNonChannelException, s)); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); @@ -160,6 +160,11 @@ protected ConnectionProfile maybeOverrideConnectionProfile(ConnectionProfile con return builder.build(); } + private void onNonChannelException(Exception exception) { + logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), + exception); + } + private void exceptionCaught(NioSocketChannel channel, Exception exception) { onException((TcpChannel) channel, exception); } diff --git a/x-pack/docs/en/rest-api/security/get-roles.asciidoc b/x-pack/docs/en/rest-api/security/get-roles.asciidoc index 9c0f5d589b815..c035c37cd07e8 100644 --- a/x-pack/docs/en/rest-api/security/get-roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-roles.asciidoc @@ -57,9 +57,10 @@ role. If the role is not defined in the native realm, the request returns 404. { "names" : [ "index1", "index2" ], "privileges" : [ "all" ], - "field_security" : { + "allow_restricted_indices" : false, + "field_security" : { "grant" : [ "title", "body" ]} - } + } ], "applications" : [ ], "run_as" : [ "other_user" ], @@ -69,7 +70,7 @@ role. If the role is not defined in the native realm, the request returns 404. "transient_metadata": { "enabled": true } - } + } } -------------------------------------------------- // TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/has-privileges.asciidoc b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc index ee3d871c7943a..92f1081bc2b85 100644 --- a/x-pack/docs/en/rest-api/security/has-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc @@ -29,6 +29,11 @@ privilege is assigned to the user. `index`:: `names`::: (list) A list of indices. +`allow_restricted_indices`::: (boolean) If `names` contains internal restricted +that also have to be covered by the has-privilege check, then this has to be +set to `true`. By default this is `false` because restricted indices should +generaly not be "visible" to APIs. For most use cases it is safe to ignore +this parameter. `privileges`::: (list) A list of the privileges that you want to check for the specified indices. diff --git a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc index aca6fc27b1dd8..b9b6d44fd69b7 100644 --- a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc +++ b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc @@ -54,7 +54,7 @@ added to an index directly as part of the index creation: [source,shell] ------------------------------------------------------------------------------- -PUT /2015?include_type_name=true +PUT /2015 { "aliases" : { "current_year" : {} diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index b9cd464241fa2..c16a80ff37d8b 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -141,7 +141,7 @@ integTestCluster { setting 'xpack.license.self_generated.type', 'trial' keystoreSetting 'bootstrap.password', 'x-pack-test-password' keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - distribution = 'zip' // this is important since we use the reindex module in ML + distribution = 'default' // this is important since we use the reindex module in ML setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'x_pack_rest_user', '-p', 'x-pack-test-password', '-r', 'superuser' diff --git a/x-pack/plugin/ccr/qa/rest/build.gradle b/x-pack/plugin/ccr/qa/rest/build.gradle index 38873262ed273..c890064504b51 100644 --- a/x-pack/plugin/ccr/qa/rest/build.gradle +++ b/x-pack/plugin/ccr/qa/rest/build.gradle @@ -12,7 +12,7 @@ task restTest(type: RestIntegTestTask) { } restTestCluster { - distribution 'zip' + distribution 'default' // Disable assertions in FollowingEngineAssertions, otherwise an AssertionError is thrown before // indexing a document directly in a follower index. In a rest test we like to test the exception // that is thrown in production when indexing a document directly in a follower index. diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml index aa63c804aba21..5b3e6c18ef29b 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml @@ -43,6 +43,12 @@ - is_true: follow_index_shards_acked - is_true: index_following_started + - do: + ccr.follow_stats: + index: _all + - length: { indices: 1 } + - match: { indices.0.index: "bar" } + # we can not reliably wait for replication to occur so we test the endpoint without indexing any documents - do: ccr.follow_stats: @@ -77,3 +83,7 @@ index: bar - is_true: acknowledged + - do: + catch: missing + ccr.follow_stats: + index: unknown diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index b1d6467168c90..a1a0e25e126d0 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -418,12 +418,15 @@ private void updateSettings(final LongConsumer handler, final AtomicInteger retr private void handleFailure(Exception e, AtomicInteger retryCounter, Runnable task) { assert e != null; - if (shouldRetry(params.getRemoteCluster(), e) && isStopped() == false) { - int currentRetry = retryCounter.incrementAndGet(); - LOGGER.debug(new ParameterizedMessage("{} error during follow shard task, retrying [{}]", - params.getFollowShardId(), currentRetry), e); - long delay = computeDelay(currentRetry, params.getReadPollTimeout().getMillis()); - scheduler.accept(TimeValue.timeValueMillis(delay), task); + if (shouldRetry(params.getRemoteCluster(), e)) { + if (isStopped() == false) { + // Only retry is the shard follow task is not stopped. + int currentRetry = retryCounter.incrementAndGet(); + LOGGER.debug(new ParameterizedMessage("{} error during follow shard task, retrying [{}]", + params.getFollowShardId(), currentRetry), e); + long delay = computeDelay(currentRetry, params.getReadPollTimeout().getMillis()); + scheduler.accept(TimeValue.timeValueMillis(delay), task); + } } else { fatalException = ExceptionsHelper.convertToElastic(e); LOGGER.warn("shard follow task encounter non-retryable error", e); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java index 8ab66aec8e80b..dc684fbc904ce 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; @@ -13,6 +14,7 @@ import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -65,6 +67,15 @@ protected void doExecute( listener.onFailure(LicenseUtils.newComplianceException("ccr")); return; } + + if (Strings.isAllOrWildcard(request.indices()) == false) { + final ClusterState state = clusterService.state(); + Set shardFollowTaskFollowerIndices = findFollowerIndicesFromShardFollowTasks(state, request.indices()); + if (shardFollowTaskFollowerIndices.isEmpty()) { + String resources = String.join(",", request.indices()); + throw new ResourceNotFoundException("No shard follow tasks for follower indices [{}]", resources); + } + } super.doExecute(task, request, listener); } @@ -80,21 +91,7 @@ protected FollowStatsAction.StatsResponses newResponse( @Override protected void processTasks(final FollowStatsAction.StatsRequest request, final Consumer operation) { final ClusterState state = clusterService.state(); - final PersistentTasksCustomMetaData persistentTasksMetaData = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); - if (persistentTasksMetaData == null) { - return; - } - - final Set requestedFollowerIndices = request.indices() != null ? - new HashSet<>(Arrays.asList(request.indices())) : Collections.emptySet(); - final Set followerIndices = persistentTasksMetaData.tasks().stream() - .filter(persistentTask -> persistentTask.getTaskName().equals(ShardFollowTask.NAME)) - .map(persistentTask -> { - ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTask.getParams(); - return shardFollowTask.getFollowShardId().getIndexName(); - }) - .filter(followerIndex -> requestedFollowerIndices.isEmpty() || requestedFollowerIndices.contains(followerIndex)) - .collect(Collectors.toSet()); + final Set followerIndices = findFollowerIndicesFromShardFollowTasks(state, request.indices()); for (final Task task : taskManager.getTasks().values()) { if (task instanceof ShardFollowNodeTask) { @@ -114,4 +111,22 @@ protected void taskOperation( listener.onResponse(new FollowStatsAction.StatsResponse(task.getStatus())); } + static Set findFollowerIndicesFromShardFollowTasks(ClusterState state, String[] indices) { + final PersistentTasksCustomMetaData persistentTasksMetaData = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); + if (persistentTasksMetaData == null) { + return Collections.emptySet(); + } + + final Set requestedFollowerIndices = indices != null ? + new HashSet<>(Arrays.asList(indices)) : Collections.emptySet(); + return persistentTasksMetaData.tasks().stream() + .filter(persistentTask -> persistentTask.getTaskName().equals(ShardFollowTask.NAME)) + .map(persistentTask -> { + ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTask.getParams(); + return shardFollowTask.getFollowShardId().getIndexName(); + }) + .filter(followerIndex -> Strings.isAllOrWildcard(indices) || requestedFollowerIndices.contains(followerIndex)) + .collect(Collectors.toSet()); + } + } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java index 409746f9d851b..1f1c6cd5c64e3 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java @@ -6,9 +6,12 @@ package org.elasticsearch.xpack.ccr; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -116,4 +119,106 @@ public void testFollowStatsApiFollowerIndexFiltering() throws Exception { }); } + public void testFollowStatsApiResourceNotFound() throws Exception { + FollowStatsAction.StatsRequest statsRequest = new FollowStatsAction.StatsRequest(); + FollowStatsAction.StatsResponses response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(0)); + + statsRequest.setIndices(new String[] {"follower1"}); + Exception e = expectThrows(ResourceNotFoundException.class, + () -> client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet()); + assertThat(e.getMessage(), equalTo("No shard follow tasks for follower indices [follower1]")); + + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader1"); + + PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest.setIndices(new String[] {"follower2"}); + e = expectThrows(ResourceNotFoundException.class, + () -> client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet()); + assertThat(e.getMessage(), equalTo("No shard follow tasks for follower indices [follower2]")); + + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); + } + + public void testFollowStatsApiIncludeShardFollowStatsWithRemovedFollowerIndex() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader1"); + + PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + FollowStatsAction.StatsRequest statsRequest = new FollowStatsAction.StatsRequest(); + FollowStatsAction.StatsResponses response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().admin().indices().delete(new DeleteIndexRequest("follower1")).actionGet()); + + statsRequest = new FollowStatsAction.StatsRequest(); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); + } + + public void testFollowStatsApiIncludeShardFollowStatsWithClosedFollowerIndex() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader1"); + + PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + FollowStatsAction.StatsRequest statsRequest = new FollowStatsAction.StatsRequest(); + FollowStatsAction.StatsResponses response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().admin().indices().close(new CloseIndexRequest("follower1")).actionGet()); + + statsRequest = new FollowStatsAction.StatsRequest(); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); + } + } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index cf7f901aaba33..9929241fc23c8 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -40,7 +40,9 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; public class ShardFollowNodeTaskTests extends ESTestCase { @@ -287,6 +289,35 @@ public void testReceiveRetryableError() { assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } + public void testFatalExceptionNotSetWhenStoppingWhileFetchingOps() { + ShardFollowTaskParams params = new ShardFollowTaskParams(); + params.maxReadRequestOperationCount = 64; + params.maxOutstandingReadRequests = 1; + params.maxOutstandingWriteRequests = 1; + ShardFollowNodeTask task = createShardFollowTask(params); + startTask(task, 63, -1); + + readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); + + mappingVersions.add(1L); + leaderGlobalCheckpoints.add(63L); + maxSeqNos.add(63L); + responseSizes.add(64); + simulateResponse.set(true); + beforeSendShardChangesRequest = status -> { + // Cancel just before attempting to fetch operations: + task.onCancelled(); + }; + task.coordinateReads(); + + assertThat(task.isStopped(), is(true)); + ShardFollowNodeTaskStatus status = task.getStatus(); + assertThat(status.getFatalException(), nullValue()); + assertThat(status.failedReadRequests(), equalTo(1L)); + assertThat(status.successfulReadRequests(), equalTo(0L)); + assertThat(status.readExceptions().size(), equalTo(1)); + } + public void testEmptyShardChangesResponseShouldClearFetchException() { ShardFollowTaskParams params = new ShardFollowTaskParams(); params.maxReadRequestOperationCount = 64; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java new file mode 100644 index 0000000000000..bc8c58f1de7de --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class TransportFollowStatsActionTests extends ESTestCase { + + public void testFindFollowerIndicesFromShardFollowTasks() { + PersistentTasksCustomMetaData.Builder persistentTasks = PersistentTasksCustomMetaData.builder() + .addTask("1", ShardFollowTask.NAME, createShardFollowTask("abc"), null) + .addTask("2", ShardFollowTask.NAME, createShardFollowTask("def"), null); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_cluster")) + .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, persistentTasks.build()).build()) + .build(); + Set result = TransportFollowStatsAction.findFollowerIndicesFromShardFollowTasks(clusterState, null); + assertThat(result.size(), equalTo(2)); + assertThat(result.contains("abc"), is(true)); + assertThat(result.contains("def"), is(true)); + + result = TransportFollowStatsAction.findFollowerIndicesFromShardFollowTasks(clusterState, new String[]{"def"}); + assertThat(result.size(), equalTo(1)); + assertThat(result.contains("def"), is(true)); + + result = TransportFollowStatsAction.findFollowerIndicesFromShardFollowTasks(clusterState, new String[]{"ghi"}); + assertThat(result.size(), equalTo(0)); + } + + private static ShardFollowTask createShardFollowTask(String followerIndex) { + return new ShardFollowTask( + null, + new ShardId(followerIndex, "", 0), + new ShardId("leader_index", "", 0), + 1024, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + 1, + 1024, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + 1, + 10240, + new ByteSizeValue(512, ByteSizeUnit.MB), + TimeValue.timeValueMillis(10), + TimeValue.timeValueMillis(10), + Collections.emptyMap() + ); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java index 2152bbe4f6cf8..3f9de8f1e68b3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -92,13 +92,14 @@ void addIndex(RoleDescriptor.IndicesPrivileges... privileges) { } public void addIndex(String[] indices, String[] privileges, String[] grantedFields, String[] deniedFields, - @Nullable BytesReference query) { + @Nullable BytesReference query, boolean allowRestrictedIndices) { this.indicesPrivileges.add(RoleDescriptor.IndicesPrivileges.builder() .indices(indices) .privileges(privileges) .grantedFields(grantedFields) .deniedFields(deniedFields) .query(query) + .allowRestrictedIndices(allowRestrictedIndices) .build()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java index 670deb2216bf6..623ca55b3cd6c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java @@ -64,8 +64,8 @@ public PutRoleRequestBuilder runAs(String... runAsUsers) { } public PutRoleRequestBuilder addIndices(String[] indices, String[] privileges, String[] grantedFields, String[] deniedFields, - @Nullable BytesReference query) { - request.addIndex(indices, privileges, grantedFields, deniedFields, query); + @Nullable BytesReference query, boolean allowRestrictedIndices) { + request.addIndex(indices, privileges, grantedFields, deniedFields, query, allowRestrictedIndices); return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java index 6a6f011a76ad2..b6d84d766c328 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.security.action.user; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -121,14 +122,17 @@ public static class Indices implements ToXContentObject, Writeable { private final Set privileges; private final Set fieldSecurity; private final Set queries; + private final boolean allowRestrictedIndices; public Indices(Collection indices, Collection privileges, - Set fieldSecurity, Set queries) { + Set fieldSecurity, Set queries, + boolean allowRestrictedIndices) { // The use of TreeSet is to provide a consistent order that can be relied upon in tests this.indices = Collections.unmodifiableSet(new TreeSet<>(Objects.requireNonNull(indices))); this.privileges = Collections.unmodifiableSet(new TreeSet<>(Objects.requireNonNull(privileges))); this.fieldSecurity = Collections.unmodifiableSet(Objects.requireNonNull(fieldSecurity)); this.queries = Collections.unmodifiableSet(Objects.requireNonNull(queries)); + this.allowRestrictedIndices = allowRestrictedIndices; } public Indices(StreamInput in) throws IOException { @@ -141,6 +145,11 @@ public Indices(StreamInput in) throws IOException { return new FieldPermissionsDefinition.FieldGrantExcludeGroup(grant, exclude); })); queries = Collections.unmodifiableSet(in.readSet(StreamInput::readBytesReference)); + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { + this.allowRestrictedIndices = in.readBoolean(); + } else { + this.allowRestrictedIndices = false; + } } public Set getIndices() { @@ -159,11 +168,16 @@ public Set getQueries() { return queries; } + public boolean allowRestrictedIndices() { + return allowRestrictedIndices; + } + @Override public String toString() { StringBuilder sb = new StringBuilder(getClass().getSimpleName()) .append("[") .append("indices=[").append(Strings.collectionToCommaDelimitedString(indices)) + .append("], allow_restricted_indices=[").append(allowRestrictedIndices) .append("], privileges=[").append(Strings.collectionToCommaDelimitedString(privileges)) .append("]"); if (fieldSecurity.isEmpty() == false) { @@ -188,12 +202,13 @@ public boolean equals(Object o) { return this.indices.equals(that.indices) && this.privileges.equals(that.privileges) && this.fieldSecurity.equals(that.fieldSecurity) - && this.queries.equals(that.queries); + && this.queries.equals(that.queries) + && this.allowRestrictedIndices == that.allowRestrictedIndices; } @Override public int hashCode() { - return Objects.hash(indices, privileges, fieldSecurity, queries); + return Objects.hash(indices, privileges, fieldSecurity, queries, allowRestrictedIndices); } @Override @@ -222,6 +237,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); } + builder.field(RoleDescriptor.Fields.ALLOW_RESTRICTED_INDICES.getPreferredName(), allowRestrictedIndices); return builder.endObject(); } @@ -238,6 +254,9 @@ public void writeTo(StreamOutput out) throws IOException { output.writeOptionalStringArray(fields.getExcludedFields()); }); out.writeCollection(queries, StreamOutput::writeBytesReference); + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { + out.writeBoolean(allowRestrictedIndices); + } } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index 9efbf985b832e..674711de88166 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -428,6 +428,7 @@ private static RoleDescriptor.IndicesPrivileges parseIndex(String roleName, XCon String[] privileges = null; String[] grantedFields = null; String[] deniedFields = null; + boolean allowRestrictedIndices = false; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -444,6 +445,13 @@ private static RoleDescriptor.IndicesPrivileges parseIndex(String roleName, XCon throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] " + "value to be a string or an array of strings, but found [{}] instead", roleName, currentFieldName, token); } + } else if (Fields.ALLOW_RESTRICTED_INDICES.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_BOOLEAN) { + allowRestrictedIndices = parser.booleanValue(); + } else { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] " + + "value to be a boolean, but found [{}] instead", roleName, currentFieldName, token); + } } else if (Fields.QUERY.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.START_OBJECT) { XContentBuilder builder = JsonXContent.contentBuilder(); @@ -543,6 +551,7 @@ private static RoleDescriptor.IndicesPrivileges parseIndex(String roleName, XCon .grantedFields(grantedFields) .deniedFields(deniedFields) .query(query) + .allowRestrictedIndices(allowRestrictedIndices) .build(); } @@ -590,6 +599,10 @@ public static class IndicesPrivileges implements ToXContentObject, Writeable { private String[] grantedFields = null; private String[] deniedFields = null; private BytesReference query; + // by default certain restricted indices are exempted when granting privileges, as they should generally be hidden for ordinary + // users. Setting this flag eliminates this special status, and any index name pattern in the permission will cover restricted + // indices as well. + private boolean allowRestrictedIndices = false; private IndicesPrivileges() { } @@ -600,6 +613,11 @@ public IndicesPrivileges(StreamInput in) throws IOException { this.deniedFields = in.readOptionalStringArray(); this.privileges = in.readStringArray(); this.query = in.readOptionalBytesReference(); + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { + allowRestrictedIndices = in.readBoolean(); + } else { + allowRestrictedIndices = false; + } } @Override @@ -609,6 +627,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalStringArray(deniedFields); out.writeStringArray(privileges); out.writeOptionalBytesReference(query); + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { + out.writeBoolean(allowRestrictedIndices); + } } public static Builder builder() { @@ -646,6 +667,10 @@ public boolean isUsingFieldLevelSecurity() { return hasDeniedFields() || hasGrantedFields(); } + public boolean allowRestrictedIndices() { + return allowRestrictedIndices; + } + private boolean hasDeniedFields() { return deniedFields != null && deniedFields.length > 0; } @@ -666,6 +691,7 @@ private boolean hasGrantedFields() { public String toString() { StringBuilder sb = new StringBuilder("IndicesPrivileges["); sb.append("indices=[").append(Strings.arrayToCommaDelimitedString(indices)); + sb.append("], allowRestrictedIndices=[").append(allowRestrictedIndices); sb.append("], privileges=[").append(Strings.arrayToCommaDelimitedString(privileges)); sb.append("], "); if (grantedFields != null || deniedFields != null) { @@ -702,6 +728,7 @@ public boolean equals(Object o) { IndicesPrivileges that = (IndicesPrivileges) o; if (!Arrays.equals(indices, that.indices)) return false; + if (allowRestrictedIndices != that.allowRestrictedIndices) return false; if (!Arrays.equals(privileges, that.privileges)) return false; if (!Arrays.equals(grantedFields, that.grantedFields)) return false; if (!Arrays.equals(deniedFields, that.deniedFields)) return false; @@ -711,6 +738,7 @@ public boolean equals(Object o) { @Override public int hashCode() { int result = Arrays.hashCode(indices); + result = 31 * result + (allowRestrictedIndices ? 1 : 0); result = 31 * result + Arrays.hashCode(privileges); result = 31 * result + Arrays.hashCode(grantedFields); result = 31 * result + Arrays.hashCode(deniedFields); @@ -736,6 +764,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (query != null) { builder.field("query", query.utf8ToString()); } + builder.field(RoleDescriptor.Fields.ALLOW_RESTRICTED_INDICES.getPreferredName(), allowRestrictedIndices); return builder.endObject(); } @@ -774,6 +803,11 @@ public Builder query(@Nullable String query) { return query(query == null ? null : new BytesArray(query)); } + public Builder allowRestrictedIndices(boolean allow) { + indicesPrivileges.allowRestrictedIndices = allow; + return this; + } + public Builder query(@Nullable BytesReference query) { if (query == null) { indicesPrivileges.query = null; @@ -954,6 +988,7 @@ public interface Fields { ParseField APPLICATIONS = new ParseField("applications"); ParseField RUN_AS = new ParseField("run_as"); ParseField NAMES = new ParseField("names"); + ParseField ALLOW_RESTRICTED_INDICES = new ParseField("allow_restricted_indices"); ParseField RESOURCES = new ParseField("resources"); ParseField QUERY = new ParseField("query"); ParseField PRIVILEGES = new ParseField("privileges"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index 4936071ee8445..4c2a479721a2a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -17,20 +17,21 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.security.support.Automatons; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.SortedMap; import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Function; +import java.util.concurrent.ConcurrentMap; import java.util.function.Predicate; import static java.util.Collections.unmodifiableMap; @@ -40,30 +41,19 @@ * A permission that is based on privileges for index related actions executed * on specific indices */ -public final class IndicesPermission implements Iterable { +public final class IndicesPermission { public static final IndicesPermission NONE = new IndicesPermission(); - private final Function> loadingFunction; - - private final ConcurrentHashMap> allowedIndicesMatchersForAction = new ConcurrentHashMap<>(); + private final ConcurrentMap> allowedIndicesMatchersForAction = new ConcurrentHashMap<>(); private final Group[] groups; public IndicesPermission(Group... groups) { this.groups = groups; - loadingFunction = (action) -> { - List indices = new ArrayList<>(); - for (Group group : groups) { - if (group.actionMatcher.test(action)) { - indices.addAll(Arrays.asList(group.indices)); - } - } - return indexMatcher(indices); - }; } - static Predicate indexMatcher(List indices) { + static Predicate indexMatcher(Collection indices) { Set exactMatch = new HashSet<>(); List nonExactMatch = new ArrayList<>(); for (String indexPattern : indices) { @@ -106,11 +96,6 @@ private static Predicate buildAutomataPredicate(List indices) { } } - @Override - public Iterator iterator() { - return Arrays.asList(groups).iterator(); - } - public Group[] groups() { return groups; } @@ -120,7 +105,7 @@ public Group[] groups() { * has the privilege for executing the given action on. */ public Predicate allowedIndicesMatcher(String action) { - return allowedIndicesMatchersForAction.computeIfAbsent(action, loadingFunction); + return allowedIndicesMatchersForAction.computeIfAbsent(action, a -> Group.buildIndexMatcherPredicateForAction(a, groups)); } /** @@ -232,15 +217,15 @@ public static class Group { private final Predicate actionMatcher; private final String[] indices; private final Predicate indexNameMatcher; - - public FieldPermissions getFieldPermissions() { - return fieldPermissions; - } - private final FieldPermissions fieldPermissions; private final Set query; + // by default certain restricted indices are exempted when granting privileges, as they should generally be hidden for ordinary + // users. Setting this flag true eliminates the special status for the purpose of this permission - restricted indices still have + // to be covered by the the "indices" + private final boolean allowRestrictedIndices; - public Group(IndexPrivilege privilege, FieldPermissions fieldPermissions, @Nullable Set query, String... indices) { + public Group(IndexPrivilege privilege, FieldPermissions fieldPermissions, @Nullable Set query, + boolean allowRestrictedIndices, String... indices) { assert indices.length != 0; this.privilege = privilege; this.actionMatcher = privilege.predicate(); @@ -248,6 +233,7 @@ public Group(IndexPrivilege privilege, FieldPermissions fieldPermissions, @Nulla this.indexNameMatcher = indexMatcher(Arrays.asList(indices)); this.fieldPermissions = Objects.requireNonNull(fieldPermissions); this.query = query; + this.allowRestrictedIndices = allowRestrictedIndices; } public IndexPrivilege privilege() { @@ -263,18 +249,66 @@ public Set getQuery() { return query; } + public FieldPermissions getFieldPermissions() { + return fieldPermissions; + } + private boolean check(String action) { return actionMatcher.test(action); } private boolean check(String action, String index) { assert index != null; - return check(action) && indexNameMatcher.test(index); + return check(action) && (indexNameMatcher.test(index) + && (allowRestrictedIndices + // all good if it is not restricted + || (false == RestrictedIndicesNames.NAMES_SET.contains(index)) + // allow monitor as a special case, even for restricted + || IndexPrivilege.MONITOR.predicate().test(action))); } boolean hasQuery() { return query != null; } + + public boolean allowRestrictedIndices() { + return allowRestrictedIndices; + } + + public static Automaton buildIndexMatcherAutomaton(boolean allowRestrictedIndices, String... indices) { + final Automaton indicesAutomaton = Automatons.patterns(indices); + if (allowRestrictedIndices) { + return indicesAutomaton; + } else { + return Automatons.minusAndMinimize(indicesAutomaton, RestrictedIndicesNames.NAMES_AUTOMATON); + } + } + + private static Predicate buildIndexMatcherPredicateForAction(String action, Group... groups) { + final Set ordinaryIndices = new HashSet<>(); + final Set restrictedIndices = new HashSet<>(); + for (final Group group : groups) { + if (group.actionMatcher.test(action)) { + if (group.allowRestrictedIndices) { + restrictedIndices.addAll(Arrays.asList(group.indices())); + } else { + ordinaryIndices.addAll(Arrays.asList(group.indices())); + } + } + } + final Predicate predicate; + if (restrictedIndices.isEmpty()) { + predicate = indexMatcher(ordinaryIndices) + .and(index -> false == RestrictedIndicesNames.NAMES_SET.contains(index)); + } else if (ordinaryIndices.isEmpty()) { + predicate = indexMatcher(restrictedIndices); + } else { + predicate = indexMatcher(restrictedIndices) + .or(indexMatcher(ordinaryIndices) + .and(index -> false == RestrictedIndicesNames.NAMES_SET.contains(index))); + } + return predicate; + } } private static class DocumentLevelPermissions { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java index 8a68e71d0b93a..1f789e96d5a04 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java @@ -155,12 +155,13 @@ public Builder runAs(Privilege privilege) { } public Builder add(IndexPrivilege privilege, String... indices) { - groups.add(new IndicesPermission.Group(privilege, FieldPermissions.DEFAULT, null, indices)); + groups.add(new IndicesPermission.Group(privilege, FieldPermissions.DEFAULT, null, false, indices)); return this; } - public Builder add(FieldPermissions fieldPermissions, Set query, IndexPrivilege privilege, String... indices) { - groups.add(new IndicesPermission.Group(privilege, fieldPermissions, query, indices)); + public Builder add(FieldPermissions fieldPermissions, Set query, IndexPrivilege privilege, + boolean allowRestrictedIndices, String... indices) { + groups.add(new IndicesPermission.Group(privilege, fieldPermissions, query, allowRestrictedIndices, indices)); return this; } @@ -189,11 +190,8 @@ static List convertFromIndicesPrivileges(RoleDescriptor new FieldPermissionsDefinition(privilege.getGrantedFields(), privilege.getDeniedFields())); } final Set query = privilege.getQuery() == null ? null : Collections.singleton(privilege.getQuery()); - list.add(new IndicesPermission.Group(IndexPrivilege.get(Sets.newHashSet(privilege.getPrivileges())), - fieldPermissions, - query, - privilege.getIndices())); - + list.add(new IndicesPermission.Group(IndexPrivilege.get(Sets.newHashSet(privilege.getPrivileges())), fieldPermissions, + query, privilege.allowRestrictedIndices(), privilege.getIndices())); } return list; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 583a060ddbc6d..f15e078f9a941 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -32,7 +32,7 @@ public class ReservedRolesStore implements BiConsumer, ActionListene public static final RoleDescriptor SUPERUSER_ROLE_DESCRIPTOR = new RoleDescriptor("superuser", new String[] { "all" }, new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build()}, + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").allowRestrictedIndices(true).build()}, new RoleDescriptor.ApplicationResourcePrivileges[] { RoleDescriptor.ApplicationResourcePrivileges.builder().application("*").privileges("*").resources("*").build() }, @@ -43,11 +43,7 @@ public class ReservedRolesStore implements BiConsumer, ActionListene private static Map initializeReservedRoles() { return MapBuilder.newMapBuilder() - .put("superuser", new RoleDescriptor("superuser", new String[] { "all" }, - new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build()}, - new String[] { "*" }, - MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("superuser", SUPERUSER_ROLE_DESCRIPTOR) .put("transport_client", new RoleDescriptor("transport_client", new String[] { "transport_client" }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) .put("kibana_user", new RoleDescriptor("kibana_user", null, new RoleDescriptor.IndicesPrivileges[] { @@ -82,8 +78,10 @@ private static Map initializeReservedRoles() { "monitor" }, new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("monitor").build(), - RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*").privileges("read").build() + RoleDescriptor.IndicesPrivileges.builder() + .indices("*").privileges("monitor").allowRestrictedIndices(true).build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices(".kibana*").privileges("read").build() }, null, null, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/RestrictedIndicesNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/RestrictedIndicesNames.java new file mode 100644 index 0000000000000..fc03831d1445b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/RestrictedIndicesNames.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.index; + +import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.support.Automatons; +import org.elasticsearch.xpack.core.upgrade.IndexUpgradeCheckVersion; + +import java.util.Collections; +import java.util.Set; + +public final class RestrictedIndicesNames { + public static final String AUDIT_INDEX_NAME_PREFIX = ".security_audit_log"; + public static final String INTERNAL_SECURITY_INDEX = ".security-" + IndexUpgradeCheckVersion.UPRADE_VERSION; + public static final String SECURITY_INDEX_NAME = ".security"; + + public static final Set NAMES_SET = Collections.unmodifiableSet(Sets.newHashSet(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)); + public static final Automaton NAMES_AUTOMATON = Automatons.patterns(NAMES_SET); + + private RestrictedIndicesNames() { + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/resources/security-index-template.json b/x-pack/plugin/core/src/main/resources/security-index-template.json index bac5930c0d5c9..3723aff9054de 100644 --- a/x-pack/plugin/core/src/main/resources/security-index-template.json +++ b/x-pack/plugin/core/src/main/resources/security-index-template.json @@ -88,6 +88,9 @@ }, "query" : { "type" : "keyword" + }, + "allow_restricted_indices" : { + "type" : "boolean" } } }, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityActionTests.java index 34634c3972d56..c0848f5326c40 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityActionTests.java @@ -38,6 +38,7 @@ protected Reader instanceReader() { return SetPriorityAction::new; } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/37652") public void testNonPositivePriority() { Exception e = expectThrows(Exception.class, () -> new SetPriorityAction(randomIntBetween(-100, 0))); assertThat(e.getMessage(), equalTo("[priority] must be 0 or greater")); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java index a68a522f0242c..f642f3ab919d2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; @@ -59,7 +60,7 @@ public void testSerialization() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); if (randomBoolean()) { - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_7_0, Version.CURRENT); logger.info("Serializing with version {}", version); out.setVersion(version); } @@ -74,7 +75,32 @@ public void testSerialization() throws IOException { assertThat(copy.roleDescriptor(), equalTo(original.roleDescriptor())); } - public void testSerializationV63AndBefore() throws IOException { + public void testSerializationBetweenV64AndV66() throws IOException { + final PutRoleRequest original = buildRandomRequest(); + + final BytesStreamOutput out = new BytesStreamOutput(); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.V_6_6_0); + out.setVersion(version); + original.writeTo(out); + + final PutRoleRequest copy = new PutRoleRequest(); + final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); + StreamInput in = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(out.bytes())), registry); + in.setVersion(version); + copy.readFrom(in); + + assertThat(copy.name(), equalTo(original.name())); + assertThat(copy.cluster(), equalTo(original.cluster())); + assertIndicesSerializedRestricted(copy.indices(), original.indices()); + assertThat(copy.runAs(), equalTo(original.runAs())); + assertThat(copy.metadata(), equalTo(original.metadata())); + assertThat(copy.getRefreshPolicy(), equalTo(original.getRefreshPolicy())); + + assertThat(copy.applicationPrivileges(), equalTo(original.applicationPrivileges())); + assertThat(copy.conditionalClusterPrivileges(), equalTo(original.conditionalClusterPrivileges())); + } + + public void testSerializationV60AndV32() throws IOException { final PutRoleRequest original = buildRandomRequest(); final BytesStreamOutput out = new BytesStreamOutput(); @@ -89,7 +115,7 @@ public void testSerializationV63AndBefore() throws IOException { assertThat(copy.name(), equalTo(original.name())); assertThat(copy.cluster(), equalTo(original.cluster())); - assertThat(copy.indices(), equalTo(original.indices())); + assertIndicesSerializedRestricted(copy.indices(), original.indices()); assertThat(copy.runAs(), equalTo(original.runAs())); assertThat(copy.metadata(), equalTo(original.metadata())); assertThat(copy.getRefreshPolicy(), equalTo(original.getRefreshPolicy())); @@ -98,6 +124,18 @@ public void testSerializationV63AndBefore() throws IOException { assertThat(copy.conditionalClusterPrivileges(), arrayWithSize(0)); } + private void assertIndicesSerializedRestricted(RoleDescriptor.IndicesPrivileges[] copy, RoleDescriptor.IndicesPrivileges[] original) { + assertThat(copy.length, equalTo(original.length)); + for (int i = 0; i < copy.length; i++) { + assertThat(copy[i].allowRestrictedIndices(), equalTo(false)); + assertThat(copy[i].getIndices(), equalTo(original[i].getIndices())); + assertThat(copy[i].getPrivileges(), equalTo(original[i].getPrivileges())); + assertThat(copy[i].getDeniedFields(), equalTo(original[i].getDeniedFields())); + assertThat(copy[i].getGrantedFields(), equalTo(original[i].getGrantedFields())); + assertThat(copy[i].getQuery(), equalTo(original[i].getQuery())); + } + } + private void assertSuccessfulValidation(PutRoleRequest request) { final ActionRequestValidationException exception = request.validate(); assertThat(exception, nullValue()); @@ -135,7 +173,8 @@ private PutRoleRequest buildRandomRequest() { randomSubsetOf(randomIntBetween(1, 2), "read", "write", "index", "all").toArray(Strings.EMPTY_ARRAY), generateRandomStringArray(randomIntBetween(1, 3), randomIntBetween(3, 8), true), generateRandomStringArray(randomIntBetween(1, 3), randomIntBetween(3, 8), true), - null + null, + randomBoolean() ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java index 5804230bcddab..a9e60fff3a167 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java @@ -77,8 +77,9 @@ public GetUserPrivilegesResponse mutate(GetUserPrivilegesResponse original) { final Set cluster = maybeMutate(random, 0, original.getClusterPrivileges(), () -> randomAlphaOfLength(5)); final Set conditionalCluster = maybeMutate(random, 1, original.getConditionalClusterPrivileges(), () -> new ManageApplicationPrivileges(randomStringSet(3))); - final Set index = maybeMutate(random, 2, original.getIndexPrivileges(), - () -> new GetUserPrivilegesResponse.Indices(randomStringSet(1), randomStringSet(1), emptySet(), emptySet())); + final Set index = maybeMutate(random, 2, original.getIndexPrivileges(), + () -> new GetUserPrivilegesResponse.Indices(randomStringSet(1), randomStringSet(1), emptySet(), emptySet(), + randomBoolean())); final Set application = maybeMutate(random, 3, original.getApplicationPrivileges(), () -> ApplicationResourcePrivileges.builder().resources(generateRandomStringArray(3, 3, false, false)) .application(randomAlphaOfLength(5)).privileges(generateRandomStringArray(3, 5, false, false)).build()); @@ -110,7 +111,7 @@ private GetUserPrivilegesResponse randomResponse() { () -> new GetUserPrivilegesResponse.Indices(randomStringSet(6), randomStringSet(8), Sets.newHashSet(randomArray(3, FieldGrantExcludeGroup[]::new, () -> new FieldGrantExcludeGroup( generateRandomStringArray(3, 5, false, false), generateRandomStringArray(3, 5, false, false)))), - randomStringSet(3).stream().map(BytesArray::new).collect(Collectors.toSet()) + randomStringSet(3).stream().map(BytesArray::new).collect(Collectors.toSet()), randomBoolean() )) ); final Set application = Sets.newHashSet(randomArray(5, ApplicationResourcePrivileges[]::new, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 9c9e3ecd3c6cc..7ad03846c1d7e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -15,10 +15,15 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusAction; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.get.GetAction; @@ -94,6 +99,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.security.user.APMSystemUser; import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; @@ -115,6 +121,7 @@ import org.joda.time.DateTimeZone; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Map; @@ -177,6 +184,8 @@ public void testIngestAdminRole() { is(false)); assertThat(ingestAdminRole.indices().allowedIndicesMatcher(GetAction.NAME).test(randomAlphaOfLengthBetween(8, 24)), is(false)); + + assertNoAccessAllowed(ingestAdminRole, RestrictedIndicesNames.NAMES_SET); } public void testKibanaSystemRole() { @@ -276,6 +285,8 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(index), is(false)); + + assertNoAccessAllowed(kibanaRole, RestrictedIndicesNames.NAMES_SET); } public void testKibanaUserRole() { @@ -325,6 +336,8 @@ public void testKibanaUserRole() { final String applicationWithRandomIndex = "kibana-.kibana_" + randomAlphaOfLengthBetween(8, 24); assertThat(kibanaUserRole.application().grants(new ApplicationPrivilege(applicationWithRandomIndex, "app-random-index", "all"), "*"), is(false)); + + assertNoAccessAllowed(kibanaUserRole, RestrictedIndicesNames.NAMES_SET); } public void testMonitoringUserRole() { @@ -366,6 +379,8 @@ public void testMonitoringUserRole() { assertThat(monitoringUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true)); assertThat(monitoringUserRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); assertThat(monitoringUserRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(index), is(true)); + + assertNoAccessAllowed(monitoringUserRole, RestrictedIndicesNames.NAMES_SET); } public void testRemoteMonitoringAgentRole() { @@ -424,6 +439,7 @@ public void testRemoteMonitoringAgentRole() { assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(metricbeatIndex), is(false)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME).test(metricbeatIndex), is(false)); + assertNoAccessAllowed(remoteMonitoringAgentRole, RestrictedIndicesNames.NAMES_SET); } public void testRemoteMonitoringCollectorRole() { @@ -470,6 +486,29 @@ public void testRemoteMonitoringCollectorRole() { assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(false)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(index), is(false)); }); + + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetSettingsAction.NAME) + .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesShardStoresAction.NAME) + .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(UpgradeStatusAction.NAME) + .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(RecoveryAction.NAME) + .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesStatsAction.NAME) + .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesSegmentsAction.NAME) + .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME) + .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME) + .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteAction.NAME) + .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(false)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndexAction.NAME) + .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(false)); + + assertNoAccessAllowed(remoteMonitoringAgentRole, RestrictedIndicesNames.NAMES_SET); } public void testReportingUserRole() { @@ -508,6 +547,8 @@ public void testReportingUserRole() { assertThat(reportingUserRole.indices().allowedIndicesMatcher(UpdateAction.NAME).test(index), is(false)); assertThat(reportingUserRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(false)); assertThat(reportingUserRole.indices().allowedIndicesMatcher(BulkAction.NAME).test(index), is(false)); + + assertNoAccessAllowed(reportingUserRole, RestrictedIndicesNames.NAMES_SET); } public void testKibanaDashboardOnlyUserRole() { @@ -553,6 +594,8 @@ public void testKibanaDashboardOnlyUserRole() { final String applicationWithRandomIndex = "kibana-.kibana_" + randomAlphaOfLengthBetween(8, 24); assertThat(dashboardsOnlyUserRole.application().grants( new ApplicationPrivilege(applicationWithRandomIndex, "app-random-index", "all"), "*"), is(false)); + + assertNoAccessAllowed(dashboardsOnlyUserRole, RestrictedIndicesNames.NAMES_SET); } public void testSuperuserRole() { @@ -583,6 +626,12 @@ public void testSuperuserRole() { .putAlias(new AliasMetaData.Builder("ab").build()) .putAlias(new AliasMetaData.Builder("ba").build()) .build(), true) + .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .putAlias(new AliasMetaData.Builder(RestrictedIndicesNames.SECURITY_INDEX_NAME).build()) + .build(), true) .build(); FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); @@ -600,10 +649,19 @@ public void testSuperuserRole() { .authorize(UpdateSettingsAction.NAME, Sets.newHashSet("aaaaaa", "ba"), metaData, fieldPermissionsCache); assertThat(authzMap.get("aaaaaa").isGranted(), is(true)); assertThat(authzMap.get("b").isGranted(), is(true)); + authzMap = superuserRole.indices().authorize(randomFrom(IndexAction.NAME, DeleteIndexAction.NAME, SearchAction.NAME), + Sets.newHashSet(RestrictedIndicesNames.SECURITY_INDEX_NAME), metaData, fieldPermissionsCache); + assertThat(authzMap.get(RestrictedIndicesNames.SECURITY_INDEX_NAME).isGranted(), is(true)); + assertThat(authzMap.get(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX).isGranted(), is(true)); assertTrue(superuserRole.indices().check(SearchAction.NAME)); assertFalse(superuserRole.indices().check("unknown")); assertThat(superuserRole.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(true)); + + assertThat(superuserRole.indices().allowedIndicesMatcher(randomFrom(IndexAction.NAME, DeleteIndexAction.NAME, SearchAction.NAME)) + .test(RestrictedIndicesNames.SECURITY_INDEX_NAME), is(true)); + assertThat(superuserRole.indices().allowedIndicesMatcher(randomFrom(IndexAction.NAME, DeleteIndexAction.NAME, SearchAction.NAME)) + .test(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX), is(true)); } public void testLogstashSystemRole() { @@ -628,6 +686,8 @@ public void testLogstashSystemRole() { assertThat(logstashSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false)); assertThat(logstashSystemRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), is(false)); + + assertNoAccessAllowed(logstashSystemRole, RestrictedIndicesNames.NAMES_SET); } public void testBeatsAdminRole() { @@ -664,6 +724,8 @@ public void testBeatsAdminRole() { assertThat(beatsAdminRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true)); assertThat(beatsAdminRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); assertThat(beatsAdminRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); + + assertNoAccessAllowed(beatsAdminRole, RestrictedIndicesNames.NAMES_SET); } public void testBeatsSystemRole() { @@ -688,6 +750,8 @@ public void testBeatsSystemRole() { assertThat(logstashSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false)); assertThat(logstashSystemRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), is(false)); + + assertNoAccessAllowed(logstashSystemRole, RestrictedIndicesNames.NAMES_SET); } public void testAPMSystemRole() { @@ -712,6 +776,8 @@ public void testAPMSystemRole() { assertThat(APMSystemRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false)); assertThat(APMSystemRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), is(false)); + + assertNoAccessAllowed(APMSystemRole, RestrictedIndicesNames.NAMES_SET); } public void testMachineLearningAdminRole() { @@ -764,6 +830,8 @@ public void testMachineLearningAdminRole() { assertOnlyReadAllowed(role, AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX); assertOnlyReadAllowed(role, AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT); assertOnlyReadAllowed(role, AuditorField.NOTIFICATIONS_INDEX); + + assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); } public void testMachineLearningUserRole() { @@ -816,6 +884,8 @@ public void testMachineLearningUserRole() { assertNoAccessAllowed(role, AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX); assertOnlyReadAllowed(role, AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT); assertOnlyReadAllowed(role, AuditorField.NOTIFICATIONS_INDEX); + + assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); } public void testWatcherAdminRole() { @@ -843,6 +913,8 @@ public void testWatcherAdminRole() { for (String index : new String[]{ Watch.INDEX, historyIndex, TriggeredWatchStoreField.INDEX_NAME }) { assertOnlyReadAllowed(role, index); } + + assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); } public void testWatcherUserRole() { @@ -871,6 +943,8 @@ public void testWatcherUserRole() { for (String index : new String[]{ Watch.INDEX, historyIndex }) { assertOnlyReadAllowed(role, index); } + + assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); } private void assertOnlyReadAllowed(Role role, String index) { @@ -883,6 +957,14 @@ private void assertOnlyReadAllowed(Role role, String index) { assertThat(role.indices().allowedIndicesMatcher(UpdateAction.NAME).test(index), is(false)); assertThat(role.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(false)); assertThat(role.indices().allowedIndicesMatcher(BulkAction.NAME).test(index), is(false)); + + assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); + } + + private void assertNoAccessAllowed(Role role, Collection indices) { + for (String index : indices) { + assertNoAccessAllowed(role, index); + } } private void assertNoAccessAllowed(Role role, String index) { diff --git a/x-pack/plugin/ilm/qa/rest/build.gradle b/x-pack/plugin/ilm/qa/rest/build.gradle index ff9bf98bce5f5..7a79d1c20d936 100644 --- a/x-pack/plugin/ilm/qa/rest/build.gradle +++ b/x-pack/plugin/ilm/qa/rest/build.gradle @@ -21,7 +21,7 @@ restTestRunner { } restTestCluster { - distribution 'zip' + distribution 'default' setting 'xpack.ilm.enabled', 'true' setting 'xpack.ml.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 714da7cf11c35..86f615a259ff0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -76,6 +76,7 @@ import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.nio.NioGroupFactory; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackPlugin; @@ -286,6 +287,7 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw private final SetOnce securityActionFilter = new SetOnce<>(); private final SetOnce securityIndex = new SetOnce<>(); private final SetOnce indexAuditTrail = new SetOnce<>(); + private final SetOnce groupFactory = new SetOnce<>(); private final List bootstrapChecks; private final List securityExtensions = new ArrayList<>(); @@ -943,11 +945,12 @@ public Map> getTransports(Settings settings, ThreadP return Collections.emptyMap(); } + IPFilter ipFilter = this.ipFilter.get(); Map> transports = new HashMap<>(); transports.put(SecurityField.NAME4, () -> new SecurityNetty4ServerTransport(settings, Version.CURRENT, threadPool, - networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, ipFilter.get(), getSslService())); - transports.put(SecurityField.NIO, () -> new SecurityNioTransport(settings, Version.CURRENT, threadPool, - networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, ipFilter.get(), getSslService())); + networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, ipFilter, getSslService())); + transports.put(SecurityField.NIO, () -> new SecurityNioTransport(settings, Version.CURRENT, threadPool, networkService, + pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, ipFilter, getSslService(), getNioGroupFactory(settings))); return Collections.unmodifiableMap(transports); } @@ -967,7 +970,7 @@ public Map> getHttpTransports(Settings set httpTransports.put(SecurityField.NAME4, () -> new SecurityNetty4HttpServerTransport(settings, networkService, bigArrays, ipFilter.get(), getSslService(), threadPool, xContentRegistry, dispatcher)); httpTransports.put(SecurityField.NIO, () -> new SecurityNioHttpServerTransport(settings, networkService, bigArrays, - pageCacheRecycler, threadPool, xContentRegistry, dispatcher, ipFilter.get(), getSslService())); + pageCacheRecycler, threadPool, xContentRegistry, dispatcher, ipFilter.get(), getSslService(), getNioGroupFactory(settings))); return httpTransports; } @@ -1122,4 +1125,14 @@ public void accept(DiscoveryNode node, ClusterState state) { public void reloadSPI(ClassLoader loader) { securityExtensions.addAll(SecurityExtension.loadExtensions(loader)); } + + private synchronized NioGroupFactory getNioGroupFactory(Settings settings) { + if (groupFactory.get() != null) { + assert groupFactory.get().getSettings().equals(settings) : "Different settings than originally provided"; + return groupFactory.get(); + } else { + groupFactory.set(new NioGroupFactory(settings, logger)); + return groupFactory.get(); + } + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesAction.java index 518c9cb25a01a..b65f90ef6a45d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesAction.java @@ -90,7 +90,7 @@ GetUserPrivilegesResponse buildResponseObject(Role userRole) { } final Set indices = new LinkedHashSet<>(); - for (IndicesPermission.Group group : userRole.indices()) { + for (IndicesPermission.Group group : userRole.indices().groups()) { final Set queries = group.getQuery() == null ? Collections.emptySet() : group.getQuery(); final Set fieldSecurity = group.getFieldPermissions().hasFieldLevelSecurity() ? group.getFieldPermissions().getFieldPermissionsDefinition().getFieldGrantExcludeGroups() : Collections.emptySet(); @@ -98,7 +98,8 @@ GetUserPrivilegesResponse buildResponseObject(Role userRole) { Arrays.asList(group.indices()), group.privilege().name(), fieldSecurity, - queries + queries, + group.allowRestrictedIndices() )); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java index 20fa9f522e710..4856b9e172f5f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java @@ -122,7 +122,7 @@ private void checkPrivileges(HasPrivilegesRequest request, Role userRole, privileges.putAll(existing.getPrivileges()); } for (String privilege : check.getPrivileges()) { - if (testIndexMatch(index, privilege, userRole, predicateCache)) { + if (testIndexMatch(index, check.allowRestrictedIndices(), privilege, userRole, predicateCache)) { logger.debug(() -> new ParameterizedMessage("Role [{}] has [{}] on index [{}]", Strings.arrayToCommaDelimitedString(userRole.names()), privilege, index)); privileges.put(privilege, true); @@ -171,16 +171,17 @@ private void checkPrivileges(HasPrivilegesRequest request, Role userRole, listener.onResponse(new HasPrivilegesResponse(request.username(), allMatch, cluster, indices.values(), privilegesByApplication)); } - private boolean testIndexMatch(String checkIndex, String checkPrivilegeName, Role userRole, - Map predicateCache) { + private boolean testIndexMatch(String checkIndexPattern, boolean allowRestrictedIndices, String checkPrivilegeName, Role userRole, + Map predicateCache) { final IndexPrivilege checkPrivilege = IndexPrivilege.get(Collections.singleton(checkPrivilegeName)); - final Automaton checkIndexAutomaton = Automatons.patterns(checkIndex); + final Automaton checkIndexAutomaton = IndicesPermission.Group.buildIndexMatcherAutomaton(allowRestrictedIndices, checkIndexPattern); List privilegeAutomatons = new ArrayList<>(); for (IndicesPermission.Group group : userRole.indices().groups()) { - final Automaton groupIndexAutomaton = predicateCache.computeIfAbsent(group, g -> Automatons.patterns(g.indices())); - if (testIndex(checkIndexAutomaton, groupIndexAutomaton)) { + final Automaton groupIndexAutomaton = predicateCache.computeIfAbsent(group, + g -> IndicesPermission.Group.buildIndexMatcherAutomaton(g.allowRestrictedIndices(), g.indices())); + if (Operations.subsetOf(checkIndexAutomaton, groupIndexAutomaton)) { final IndexPrivilege rolePrivilege = group.privilege(); if (rolePrivilege.name().contains(checkPrivilegeName)) { return true; @@ -191,10 +192,6 @@ private boolean testIndexMatch(String checkIndex, String checkPrivilegeName, Rol return testPrivilege(checkPrivilege, Automatons.unionAndMinimize(privilegeAutomatons)); } - private static boolean testIndex(Automaton checkIndex, Automaton roleIndex) { - return Operations.subsetOf(checkIndex, roleIndex); - } - private static boolean testPrivilege(Privilege checkPrivilege, Automaton roleAutomaton) { return Operations.subsetOf(checkPrivilege.getAutomaton(), roleAutomaton); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index 35a2f5340492d..6530451781db5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -67,9 +67,7 @@ import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; import org.elasticsearch.xpack.security.authz.IndicesAndAliasesResolver.ResolvedIndices; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; -import org.elasticsearch.xpack.security.support.SecurityIndexManager; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -87,7 +85,6 @@ public class AuthorizationService { public static final String ORIGINATING_ACTION_KEY = "_originating_action_name"; public static final String ROLE_NAMES_KEY = "_effective_role_names"; - private static final Predicate MONITOR_INDEX_PREDICATE = IndexPrivilege.MONITOR.predicate(); private static final Predicate SAME_USER_PRIVILEGE = Automatons.predicate( ChangePasswordAction.NAME, AuthenticateAction.NAME, HasPrivilegesAction.NAME, GetUserPrivilegesAction.NAME); @@ -290,7 +287,7 @@ public void authorize(Authentication authentication, String action, TransportReq } final MetaData metaData = clusterService.state().metaData(); - final AuthorizedIndices authorizedIndices = new AuthorizedIndices(authentication.getUser(), permission, action, metaData); + final AuthorizedIndices authorizedIndices = new AuthorizedIndices(permission, action, metaData); final ResolvedIndices resolvedIndices = resolveIndexNames(auditId, authentication, action, request, metaData, authorizedIndices, permission); assert !resolvedIndices.isEmpty() @@ -312,18 +309,10 @@ public void authorize(Authentication authentication, String action, TransportReq final Set localIndices = new HashSet<>(resolvedIndices.getLocal()); IndicesAccessControl indicesAccessControl = permission.authorize(action, localIndices, metaData, fieldPermissionsCache); - if (!indicesAccessControl.isGranted()) { - throw denial(auditId, authentication, action, request, permission.names()); - } else if (hasSecurityIndexAccess(indicesAccessControl) - && MONITOR_INDEX_PREDICATE.test(action) == false - && isSuperuser(authentication.getUser()) == false) { - // only the XPackUser is allowed to work with this index, but we should allow indices monitoring actions through for debugging - // purposes. These monitor requests also sometimes resolve indices concretely and then requests them - logger.debug("user [{}] attempted to directly perform [{}] against the security index [{}]", - authentication.getUser().principal(), action, SecurityIndexManager.SECURITY_INDEX_NAME); - throw denial(auditId, authentication, action, request, permission.names()); - } else { + if (indicesAccessControl.isGranted()) { putTransientIfNonExisting(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl); + } else { + throw denial(auditId, authentication, action, request, permission.names()); } //if we are creating an index we need to authorize potential aliases created at the same time @@ -359,16 +348,6 @@ private boolean isInternalUser(User user) { return SystemUser.is(user) || XPackUser.is(user) || XPackSecurityUser.is(user); } - private boolean hasSecurityIndexAccess(IndicesAccessControl indicesAccessControl) { - for (String index : SecurityIndexManager.indexNames()) { - final IndicesAccessControl.IndexAccessControl indexPermissions = indicesAccessControl.getIndexPermissions(index); - if (indexPermissions != null && indexPermissions.isGranted()) { - return true; - } - } - return false; - } - /** * Performs authorization checks on the items within a {@link BulkShardRequest}. * This inspects the {@link BulkItemRequest items} within the request, computes @@ -602,11 +581,6 @@ private ElasticsearchSecurityException denialException(Authentication authentica return authorizationError("action [{}] is unauthorized for user [{}]", action, authUser.principal()); } - static boolean isSuperuser(User user) { - return Arrays.stream(user.roles()) - .anyMatch(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName()::equals); - } - public static void addSettings(List> settings) { settings.add(ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java index 3068a3993d309..0d173245e87f0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java @@ -8,8 +8,6 @@ import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.xpack.core.security.authz.permission.Role; -import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.ArrayList; import java.util.Collections; @@ -17,21 +15,17 @@ import java.util.Map; import java.util.function.Predicate; -import static org.elasticsearch.xpack.security.authz.AuthorizationService.isSuperuser; - /** * Abstraction used to make sure that we lazily load authorized indices only when requested and only maximum once per request. Also * makes sure that authorized indices don't get updated throughout the same request for the same user. */ class AuthorizedIndices { - private final User user; private final String action; private final MetaData metaData; private final Role userRoles; private List authorizedIndices; - AuthorizedIndices(User user, Role userRoles, String action, MetaData metaData) { - this.user = user; + AuthorizedIndices(Role userRoles, String action, MetaData metaData) { this.userRoles = userRoles; this.action = action; this.metaData = metaData; @@ -56,10 +50,6 @@ private List load() { } } - if (isSuperuser(user) == false) { - // we should filter out all of the security indices from wildcards - indicesAndAliases.removeAll(SecurityIndexManager.indexNames()); - } return Collections.unmodifiableList(indicesAndAliases); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index 6168192d4077f..1982bbd48b5f9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -240,7 +240,8 @@ public static void buildRoleFromDescriptors(Collection roleDescr Set clusterPrivileges = new HashSet<>(); final List conditionalClusterPrivileges = new ArrayList<>(); Set runAs = new HashSet<>(); - Map, MergeableIndicesPrivilege> indicesPrivilegesMap = new HashMap<>(); + final Map, MergeableIndicesPrivilege> restrictedIndicesPrivilegesMap = new HashMap<>(); + final Map, MergeableIndicesPrivilege> indicesPrivilegesMap = new HashMap<>(); // Keyed by application + resource Map>, Set> applicationPrivilegesMap = new HashMap<>(); @@ -257,26 +258,8 @@ public static void buildRoleFromDescriptors(Collection roleDescr if (descriptor.getRunAs() != null) { runAs.addAll(Arrays.asList(descriptor.getRunAs())); } - IndicesPrivileges[] indicesPrivileges = descriptor.getIndicesPrivileges(); - for (IndicesPrivileges indicesPrivilege : indicesPrivileges) { - Set key = newHashSet(indicesPrivilege.getIndices()); - // if a index privilege is an explicit denial, then we treat it as non-existent since we skipped these in the past when - // merging - final boolean isExplicitDenial = - indicesPrivileges.length == 1 && "none".equalsIgnoreCase(indicesPrivilege.getPrivileges()[0]); - if (isExplicitDenial == false) { - indicesPrivilegesMap.compute(key, (k, value) -> { - if (value == null) { - return new MergeableIndicesPrivilege(indicesPrivilege.getIndices(), indicesPrivilege.getPrivileges(), - indicesPrivilege.getGrantedFields(), indicesPrivilege.getDeniedFields(), indicesPrivilege.getQuery()); - } else { - value.merge(new MergeableIndicesPrivilege(indicesPrivilege.getIndices(), indicesPrivilege.getPrivileges(), - indicesPrivilege.getGrantedFields(), indicesPrivilege.getDeniedFields(), indicesPrivilege.getQuery())); - return value; - } - }); - } - } + MergeableIndicesPrivilege.collatePrivilegesByIndices(descriptor.getIndicesPrivileges(), true, restrictedIndicesPrivilegesMap); + MergeableIndicesPrivilege.collatePrivilegesByIndices(descriptor.getIndicesPrivileges(), false, indicesPrivilegesMap); for (RoleDescriptor.ApplicationResourcePrivileges appPrivilege : descriptor.getApplicationPrivileges()) { Tuple> key = new Tuple<>(appPrivilege.getApplication(), newHashSet(appPrivilege.getResources())); applicationPrivilegesMap.compute(key, (k, v) -> { @@ -297,7 +280,12 @@ public static void buildRoleFromDescriptors(Collection roleDescr indicesPrivilegesMap.entrySet().forEach((entry) -> { MergeableIndicesPrivilege privilege = entry.getValue(); builder.add(fieldPermissionsCache.getFieldPermissions(privilege.fieldPermissionsDefinition), privilege.query, - IndexPrivilege.get(privilege.privileges), privilege.indices.toArray(Strings.EMPTY_ARRAY)); + IndexPrivilege.get(privilege.privileges), false, privilege.indices.toArray(Strings.EMPTY_ARRAY)); + }); + restrictedIndicesPrivilegesMap.entrySet().forEach((entry) -> { + MergeableIndicesPrivilege privilege = entry.getValue(); + builder.add(fieldPermissionsCache.getFieldPermissions(privilege.fieldPermissionsDefinition), privilege.query, + IndexPrivilege.get(privilege.privileges), true, privilege.indices.toArray(Strings.EMPTY_ARRAY)); }); if (applicationPrivilegesMap.isEmpty()) { @@ -412,6 +400,30 @@ void merge(MergeableIndicesPrivilege other) { this.query.addAll(other.query); } } + + private static void collatePrivilegesByIndices(IndicesPrivileges[] indicesPrivileges, boolean allowsRestrictedIndices, + Map, MergeableIndicesPrivilege> indicesPrivilegesMap) { + for (final IndicesPrivileges indicesPrivilege : indicesPrivileges) { + // if a index privilege is an explicit denial, then we treat it as non-existent since we skipped these in the past when + // merging + final boolean isExplicitDenial = indicesPrivileges.length == 1 + && "none".equalsIgnoreCase(indicesPrivilege.getPrivileges()[0]); + if (isExplicitDenial || (indicesPrivilege.allowRestrictedIndices() != allowsRestrictedIndices)) { + continue; + } + final Set key = newHashSet(indicesPrivilege.getIndices()); + indicesPrivilegesMap.compute(key, (k, value) -> { + if (value == null) { + return new MergeableIndicesPrivilege(indicesPrivilege.getIndices(), indicesPrivilege.getPrivileges(), + indicesPrivilege.getGrantedFields(), indicesPrivilege.getDeniedFields(), indicesPrivilege.getQuery()); + } else { + value.merge(new MergeableIndicesPrivilege(indicesPrivilege.getIndices(), indicesPrivilege.getPrivileges(), + indicesPrivilege.getGrantedFields(), indicesPrivilege.getDeniedFields(), indicesPrivilege.getQuery())); + return value; + } + }); + } + } } private static final class RolesRetrievalResult { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java index a2dab36be7e76..9e0da2518835d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java @@ -25,6 +25,7 @@ import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.nio.NioGroupFactory; import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.transport.SecurityHttpExceptionHandler; @@ -54,8 +55,8 @@ public class SecurityNioHttpServerTransport extends NioHttpServerTransport { public SecurityNioHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, PageCacheRecycler pageCacheRecycler, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, IPFilter ipFilter, - SSLService sslService) { - super(settings, networkService, bigArrays, pageCacheRecycler, threadPool, xContentRegistry, dispatcher); + SSLService sslService, NioGroupFactory nioGroupFactory) { + super(settings, networkService, bigArrays, pageCacheRecycler, threadPool, xContentRegistry, dispatcher, nioGroupFactory); this.securityExceptionHandler = new SecurityHttpExceptionHandler(logger, lifecycle, (c, e) -> super.onException(c, e)); this.ipFilter = ipFilter; this.nioIpFilter = new NioIPFilter(ipFilter, IPFilter.HTTP_PROFILE_NAME); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java index 7fdf946675ffd..a5b64c84c4306 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java @@ -29,6 +29,7 @@ import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TransportSettings; +import org.elasticsearch.transport.nio.NioGroupFactory; import org.elasticsearch.transport.nio.NioTcpChannel; import org.elasticsearch.transport.nio.NioTcpServerChannel; import org.elasticsearch.transport.nio.NioTransport; @@ -76,8 +77,9 @@ public class SecurityNioTransport extends NioTransport { public SecurityNioTransport(Settings settings, Version version, ThreadPool threadPool, NetworkService networkService, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService, @Nullable final IPFilter authenticator, - SSLService sslService) { - super(settings, version, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService); + SSLService sslService, NioGroupFactory groupFactory) { + super(settings, version, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, + groupFactory); this.authenticator = authenticator; this.sslService = sslService; this.sslEnabled = XPackSettings.TRANSPORT_SSL_ENABLED.get(settings); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java index 79d1d19c50e8a..c7f5123c6a1b7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java @@ -51,7 +51,7 @@ public void setupForTests() { for (String role : roles) { c.preparePutRole(role) .cluster("none") - .addIndices(new String[] { "*" }, new String[] { "ALL" }, null, null, null) + .addIndices(new String[] { "*" }, new String[] { "ALL" }, null, null, null, randomBoolean()) .get(); logger.debug("--> created role [{}]", role); } @@ -83,7 +83,7 @@ public void testModifyingViaApiClearsCache() throws Exception { for (String role : toModify) { PutRoleResponse response = securityClient.preparePutRole(role) .cluster("none") - .addIndices(new String[] { "*" }, new String[] { "ALL" }, null, null, null) + .addIndices(new String[] { "*" }, new String[] { "ALL" }, null, null, null, randomBoolean()) .runAs(role) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) .get(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java index bf7ab102c2a21..08dce12167483 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java @@ -57,7 +57,7 @@ public void testInterceptorThrowsWhenFLSDLSEnabled() { } else { queries = null; } - Role role = Role.builder().add(fieldPermissions, queries, IndexPrivilege.ALL, "foo").build(); + Role role = Role.builder().add(fieldPermissions, queries, IndexPrivilege.ALL, randomBoolean(), "foo").build(); final String action = IndicesAliasesAction.NAME; IndicesAccessControl accessControl = new IndicesAccessControl(true, Collections.singletonMap("foo", new IndicesAccessControl.IndexAccessControl(true, fieldPermissions, queries))); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java index e956ad5e03151..c7835935825ac 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java @@ -60,7 +60,7 @@ public void testResizeRequestInterceptorThrowsWhenFLSDLSEnabled() { } else { queries = null; } - Role role = Role.builder().add(fieldPermissions, queries, IndexPrivilege.ALL, "foo").build(); + Role role = Role.builder().add(fieldPermissions, queries, IndexPrivilege.ALL, randomBoolean(), "foo").build(); final String action = randomFrom(ShrinkAction.NAME, ResizeAction.NAME); IndicesAccessControl accessControl = new IndicesAccessControl(true, Collections.singletonMap("foo", new IndicesAccessControl.IndexAccessControl(true, fieldPermissions, queries))); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesActionTests.java index 8ff5378cbfc22..40f467fcc1832 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesActionTests.java @@ -43,7 +43,7 @@ public void testBuildResponseObject() { .add( new FieldPermissions(new FieldPermissionsDefinition(new String[]{ "public.*" }, new String[0])), Collections.singleton(query), - IndexPrivilege.READ, "index-4", "index-5") + IndexPrivilege.READ, randomBoolean(), "index-4", "index-5") .addApplicationPrivilege(new ApplicationPrivilege("app01", "read", "data:read"), Collections.singleton("*")) .runAs(new Privilege(Sets.newHashSet("user01", "user02"), "user01", "user02")) .build(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java index 66bff81e5dd56..650ccc55e41b3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java @@ -130,8 +130,8 @@ public void testRetrieveRoles() throws Exception { c.preparePutRole(rname) .cluster("all", "none") .runAs("root", "nobody") - .addIndices(new String[]{"index"}, new String[]{"read"}, - new String[]{"body", "title"}, null, new BytesArray("{\"query\": {\"match_all\": {}}}")) + .addIndices(new String[] { "index" }, new String[] { "read" }, new String[] { "body", "title" }, null, + new BytesArray("{\"query\": {\"match_all\": {}}}"), randomBoolean()) .get(); addedRoles.add(rname); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java index e94cdff423274..212fd4a8dab42 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java @@ -77,7 +77,8 @@ public void testRoleJson() throws Exception { assertThat(ESNativeRealmMigrateTool.MigrateUserOrRoles.createRoleJson(rd), equalTo("{\"cluster\":[]," + "\"indices\":[{\"names\":[\"i1\",\"i2\",\"i3\"]," + - "\"privileges\":[\"all\"],\"field_security\":{\"grant\":[\"body\"]}}]," + + "\"privileges\":[\"all\"],\"field_security\":{\"grant\":[\"body\"]}," + + "\"allow_restricted_indices\":false}]," + "\"applications\":[]," + "\"run_as\":[],\"metadata\":{},\"type\":\"role\"}")); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index af337cdc718aa..4a925f028a524 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -104,7 +104,7 @@ public void setupAnonymousRoleIfNecessary() throws Exception { PutRoleResponse response = securityClient() .preparePutRole("native_anonymous") .cluster("ALL") - .addIndices(new String[]{"*"}, new String[]{"ALL"}, null, null, null) + .addIndices(new String[]{"*"}, new String[]{"ALL"}, null, null, null, randomBoolean()) .get(); assertTrue(response.isCreated()); } else { @@ -189,7 +189,7 @@ public void testAddAndGetRole() throws Exception { .cluster("all", "none") .runAs("root", "nobody") .addIndices(new String[]{"index"}, new String[]{"read"}, new String[]{"body", "title"}, null, - new BytesArray("{\"query\": {\"match_all\": {}}}")) + new BytesArray("{\"query\": {\"match_all\": {}}}"), randomBoolean()) .metadata(metadata) .get(); logger.error("--> waiting for .security index"); @@ -206,13 +206,13 @@ public void testAddAndGetRole() throws Exception { .cluster("all", "none") .runAs("root", "nobody") .addIndices(new String[]{"index"}, new String[]{"read"}, new String[]{"body", "title"}, null, - new BytesArray("{\"query\": {\"match_all\": {}}}")) + new BytesArray("{\"query\": {\"match_all\": {}}}"), randomBoolean()) .get(); c.preparePutRole("test_role3") .cluster("all", "none") .runAs("root", "nobody") .addIndices(new String[]{"index"}, new String[]{"read"}, new String[]{"body", "title"}, null, - new BytesArray("{\"query\": {\"match_all\": {}}}")) + new BytesArray("{\"query\": {\"match_all\": {}}}"), randomBoolean()) .get(); logger.info("--> retrieving all roles"); @@ -239,7 +239,7 @@ public void testAddUserAndRoleThenAuth() throws Exception { c.preparePutRole("test_role") .cluster("all") .addIndices(new String[] { "*" }, new String[] { "read" }, new String[]{"body", "title"}, null, - new BytesArray("{\"match_all\": {}}")) + new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); logger.error("--> creating user"); c.preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); @@ -334,7 +334,7 @@ public void testCreateAndUpdateRole() { c.preparePutRole("test_role") .cluster("all") .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, - new BytesArray("{\"match_all\": {}}")) + new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); logger.error("--> creating user"); c.preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); @@ -349,7 +349,7 @@ public void testCreateAndUpdateRole() { c.preparePutRole("test_role") .cluster("none") .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, - new BytesArray("{\"match_all\": {}}")) + new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); if (anonymousEnabled && roleExists) { assertNoTimeout(client() @@ -369,7 +369,7 @@ public void testCreateAndUpdateRole() { c.preparePutRole("test_role") .cluster("none") .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, - new BytesArray("{\"match_all\": {}}")) + new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); getRolesResponse = c.prepareGetRoles().names("test_role").get(); assertTrue("test_role does not exist!", getRolesResponse.hasRoles()); @@ -385,7 +385,7 @@ public void testAuthenticateWithDeletedRole() { c.preparePutRole("test_role") .cluster("all") .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, - new BytesArray("{\"match_all\": {}}")) + new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); c.preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role").get(); logger.error("--> waiting for .security index"); @@ -411,11 +411,11 @@ public void testPutUserWithoutPassword() { // create some roles client.preparePutRole("admin_role") .cluster("all") - .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, null) + .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, null, randomBoolean()) .get(); client.preparePutRole("read_role") .cluster("none") - .addIndices(new String[]{"*"}, new String[]{"read"}, null, null, null) + .addIndices(new String[]{"*"}, new String[]{"read"}, null, null, null, randomBoolean()) .get(); assertThat(client.prepareGetUsers("joes").get().hasUsers(), is(false)); @@ -516,7 +516,7 @@ public void testUsersAndRolesDoNotInterfereWithIndicesStats() throws Exception { } else { client.preparePutRole("read_role") .cluster("none") - .addIndices(new String[]{"*"}, new String[]{"read"}, null, null, null) + .addIndices(new String[]{"*"}, new String[]{"read"}, null, null, null, randomBoolean()) .get(); } @@ -642,7 +642,7 @@ public void testRolesUsageStats() throws Exception { SecurityClient client = new SecurityClient(client()); PutRoleResponse putRoleResponse = client.preparePutRole("admin_role") .cluster("all") - .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, null) + .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, null, randomBoolean()) .get(); assertThat(putRoleResponse.isCreated(), is(true)); roles++; @@ -660,7 +660,7 @@ public void testRolesUsageStats() throws Exception { } roleResponse = client.preparePutRole("admin_role_fls") .cluster("all") - .addIndices(new String[]{"*"}, new String[]{"all"}, grantedFields, deniedFields, null) + .addIndices(new String[]{"*"}, new String[]{"all"}, grantedFields, deniedFields, null, randomBoolean()) .get(); assertThat(roleResponse.isCreated(), is(true)); roles++; @@ -669,7 +669,7 @@ public void testRolesUsageStats() throws Exception { if (dls) { PutRoleResponse roleResponse = client.preparePutRole("admin_role_dls") .cluster("all") - .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, new BytesArray("{ \"match_all\": {} }")) + .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, new BytesArray("{\"match_all\": {}}"), randomBoolean()) .get(); assertThat(roleResponse.isCreated(), is(true)); roles++; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java index c48ac4568989b..ffb20fbf9ac91 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java @@ -16,11 +16,13 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; -import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -28,19 +30,18 @@ import java.util.Set; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.contains; public class AuthorizedIndicesTests extends ESTestCase { public void testAuthorizedIndicesUserWithoutRoles() { - User user = new User("test user"); - AuthorizedIndices authorizedIndices = new AuthorizedIndices(user, Role.EMPTY, "", - MetaData.EMPTY_META_DATA); + AuthorizedIndices authorizedIndices = new AuthorizedIndices(Role.EMPTY, "", MetaData.EMPTY_META_DATA); List list = authorizedIndices.get(); assertTrue(list.isEmpty()); } public void testAuthorizedIndicesUserWithSomeRoles() { - User user = new User("test user", "a_star", "b"); RoleDescriptor aStarRole = new RoleDescriptor("a_star", null, new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a*").privileges("all").build() }, null); RoleDescriptor bRole = new RoleDescriptor("b", null, @@ -58,55 +59,82 @@ public void testAuthorizedIndicesUserWithSomeRoles() { .putAlias(new AliasMetaData.Builder("ab").build()) .putAlias(new AliasMetaData.Builder("ba").build()) .build(), true) + .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .putAlias(new AliasMetaData.Builder(RestrictedIndicesNames.SECURITY_INDEX_NAME).build()) + .build(), true) .build(); final PlainActionFuture future = new PlainActionFuture<>(); final Set descriptors = Sets.newHashSet(aStarRole, bRole); CompositeRolesStore.buildRoleFromDescriptors(descriptors, new FieldPermissionsCache(Settings.EMPTY), null, future); Role roles = future.actionGet(); - AuthorizedIndices authorizedIndices = new AuthorizedIndices(user, roles, SearchAction.NAME, metaData); + AuthorizedIndices authorizedIndices = new AuthorizedIndices(roles, SearchAction.NAME, metaData); List list = authorizedIndices.get(); assertThat(list, containsInAnyOrder("a1", "a2", "aaaaaa", "b", "ab")); assertFalse(list.contains("bbbbb")); assertFalse(list.contains("ba")); + assertThat(list, not(contains(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX))); + assertThat(list, not(contains(RestrictedIndicesNames.SECURITY_INDEX_NAME))); } public void testAuthorizedIndicesUserWithSomeRolesEmptyMetaData() { - User user = new User("test user", "role"); Role role = Role.builder("role").add(IndexPrivilege.ALL, "*").build(); - AuthorizedIndices authorizedIndices = new AuthorizedIndices(user, role, SearchAction.NAME, MetaData.EMPTY_META_DATA); + AuthorizedIndices authorizedIndices = new AuthorizedIndices(role, SearchAction.NAME, MetaData.EMPTY_META_DATA); List list = authorizedIndices.get(); assertTrue(list.isEmpty()); } - public void testSecurityIndicesAreRemovedFromRegularUser() { - User user = new User("test user", "user_role"); - Role role = Role.builder("user_role").add(IndexPrivilege.ALL, "*").cluster(ClusterPrivilege.ALL).build(); + public void testSecurityIndicesAreRestrictedForDefaultRole() { + Role role = Role.builder(randomFrom("user_role", ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName())) + .add(IndexPrivilege.ALL, "*") + .cluster(ClusterPrivilege.ALL) + .build(); Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); MetaData metaData = MetaData.builder() .put(new IndexMetaData.Builder("an-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) .put(new IndexMetaData.Builder("another-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) - .put(new IndexMetaData.Builder(SecurityIndexManager.SECURITY_INDEX_NAME).settings(indexSettings) - .numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .putAlias(new AliasMetaData.Builder(RestrictedIndicesNames.SECURITY_INDEX_NAME).build()) + .build(), true) .build(); - AuthorizedIndices authorizedIndices = new AuthorizedIndices(user, role, SearchAction.NAME, metaData); + AuthorizedIndices authorizedIndices = new AuthorizedIndices(role, SearchAction.NAME, metaData); List list = authorizedIndices.get(); assertThat(list, containsInAnyOrder("an-index", "another-index")); + assertThat(list, not(contains(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX))); + assertThat(list, not(contains(RestrictedIndicesNames.SECURITY_INDEX_NAME))); } - public void testSecurityIndicesAreNotRemovedFromSuperUsers() { - User user = new User("admin", "kibana_user", "superuser"); - Role role = Role.builder("kibana_user+superuser").add(IndexPrivilege.ALL, "*").cluster(ClusterPrivilege.ALL).build(); + public void testSecurityIndicesAreNotRemovedFromUnrestrictedRole() { + Role role = Role.builder(randomAlphaOfLength(8)) + .add(FieldPermissions.DEFAULT, null, IndexPrivilege.ALL, true, "*") + .cluster(ClusterPrivilege.ALL) + .build(); Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); MetaData metaData = MetaData.builder() .put(new IndexMetaData.Builder("an-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) .put(new IndexMetaData.Builder("another-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) - .put(new IndexMetaData.Builder(SecurityIndexManager.SECURITY_INDEX_NAME).settings(indexSettings) - .numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .putAlias(new AliasMetaData.Builder(RestrictedIndicesNames.SECURITY_INDEX_NAME).build()) + .build(), true) .build(); - AuthorizedIndices authorizedIndices = new AuthorizedIndices(user, role, SearchAction.NAME, metaData); + AuthorizedIndices authorizedIndices = new AuthorizedIndices(role, SearchAction.NAME, metaData); List list = authorizedIndices.get(); - assertThat(list, containsInAnyOrder("an-index", "another-index", SecurityIndexManager.SECURITY_INDEX_NAME)); + assertThat(list, containsInAnyOrder("an-index", "another-index", SecurityIndexManager.SECURITY_INDEX_NAME, + SecurityIndexManager.INTERNAL_SECURITY_INDEX)); + + AuthorizedIndices authorizedIndicesSuperUser = new AuthorizedIndices(ReservedRolesStore.SUPERUSER_ROLE, SearchAction.NAME, + metaData); + assertThat(authorizedIndicesSuperUser.get(), containsInAnyOrder("an-index", "another-index", + SecurityIndexManager.SECURITY_INDEX_NAME, SecurityIndexManager.INTERNAL_SECURITY_INDEX)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index 83edb189e2935..2a1619655b315 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -1366,7 +1366,7 @@ public void testDynamicPutMappingRequestFromAlias() { private AuthorizedIndices buildAuthorizedIndices(User user, String action) { PlainActionFuture rolesListener = new PlainActionFuture<>(); authzService.roles(user, rolesListener); - return new AuthorizedIndices(user, rolesListener.actionGet(), action, metaData); + return new AuthorizedIndices(rolesListener.actionGet(), action, metaData); } public static IndexMetaData.Builder indexBuilder(String index) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java index 08e4b1123c70a..4708cbd3ffaac 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java @@ -48,10 +48,11 @@ public void testIndexGroup() throws Exception { RoleDescriptor.IndicesPrivileges privs = RoleDescriptor.IndicesPrivileges.builder() .indices("idx") .privileges("priv") + .allowRestrictedIndices(true) .build(); XContentBuilder b = jsonBuilder(); privs.toXContent(b, ToXContent.EMPTY_PARAMS); - assertEquals("{\"names\":[\"idx\"],\"privileges\":[\"priv\"]}", Strings.toString(b)); + assertEquals("{\"names\":[\"idx\"],\"privileges\":[\"priv\"],\"allow_restricted_indices\":true}", Strings.toString(b)); } public void testToString() throws Exception { @@ -80,7 +81,7 @@ public void testToString() throws Exception { assertThat(descriptor.toString(), is("Role[name=test, cluster=[all,none]" + ", global=[{APPLICATION:manage:applications=app01,app02}]" + - ", indicesPrivileges=[IndicesPrivileges[indices=[i1,i2], privileges=[read]" + + ", indicesPrivileges=[IndicesPrivileges[indices=[i1,i2], allowRestrictedIndices=[false], privileges=[read]" + ", field_security=[grant=[body,title], except=null], query={\"query\": {\"match_all\": {}}}],]" + ", applicationPrivileges=[ApplicationResourcePrivileges[application=my_app, privileges=[read,write], resources=[*]],]" + ", runAs=[sudo], metadata=[{}]]")); @@ -92,6 +93,7 @@ public void testToXContent() throws Exception { .indices("i1", "i2") .privileges("read") .grantedFields("body", "title") + .allowRestrictedIndices(randomBoolean()) .query("{\"query\": {\"match_all\": {}}}") .build() }; @@ -131,9 +133,9 @@ public void testParse() throws Exception { assertArrayEquals(new String[] { "m", "n" }, rd.getRunAs()); q = "{\"cluster\":[\"a\", \"b\"], \"run_as\": [\"m\", \"n\"], \"index\": [{\"names\": \"idx1\", \"privileges\": [\"p1\", " + - "\"p2\"]}, {\"names\": \"idx2\", \"privileges\": [\"p3\"], \"field_security\": " + + "\"p2\"]}, {\"names\": \"idx2\", \"allow_restricted_indices\": true, \"privileges\": [\"p3\"], \"field_security\": " + "{\"grant\": [\"f1\", \"f2\"]}}, {\"names\": " + - "\"idx2\", " + + "\"idx2\", \"allow_restricted_indices\": false," + "\"privileges\": [\"p3\"], \"field_security\": {\"grant\": [\"f1\", \"f2\"]}, \"query\": \"{\\\"match_all\\\": {}}\"}]}"; rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); assertEquals("test", rd.getName()); @@ -142,12 +144,13 @@ public void testParse() throws Exception { assertArrayEquals(new String[] { "m", "n" }, rd.getRunAs()); q = "{\"cluster\":[\"a\", \"b\"], \"run_as\": [\"m\", \"n\"], \"index\": [{\"names\": [\"idx1\",\"idx2\"], \"privileges\": " + - "[\"p1\", \"p2\"]}]}"; + "[\"p1\", \"p2\"], \"allow_restricted_indices\": true}]}"; rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); assertEquals("test", rd.getName()); assertArrayEquals(new String[] { "a", "b" }, rd.getClusterPrivileges()); assertEquals(1, rd.getIndicesPrivileges().length); assertArrayEquals(new String[] { "idx1", "idx2" }, rd.getIndicesPrivileges()[0].getIndices()); + assertTrue(rd.getIndicesPrivileges()[0].allowRestrictedIndices()); assertArrayEquals(new String[] { "m", "n" }, rd.getRunAs()); assertNull(rd.getIndicesPrivileges()[0].getQuery()); @@ -162,7 +165,7 @@ public void testParse() throws Exception { assertThat(rd.getMetadata().get("foo"), is("bar")); q = "{\"cluster\":[\"a\", \"b\"], \"run_as\": [\"m\", \"n\"]," + - " \"index\": [{\"names\": [\"idx1\",\"idx2\"], \"privileges\": [\"p1\", \"p2\"]}]," + + " \"index\": [{\"names\": [\"idx1\",\"idx2\"], \"allow_restricted_indices\": false, \"privileges\": [\"p1\", \"p2\"]}]," + " \"applications\": [" + " {\"resources\": [\"object-123\",\"object-456\"], \"privileges\":[\"read\", \"delete\"], \"application\":\"app1\"}," + " {\"resources\": [\"*\"], \"privileges\":[\"admin\"], \"application\":\"app2\" }" + @@ -174,8 +177,9 @@ public void testParse() throws Exception { assertThat(rd.getClusterPrivileges(), arrayContaining("a", "b")); assertThat(rd.getIndicesPrivileges().length, equalTo(1)); assertThat(rd.getIndicesPrivileges()[0].getIndices(), arrayContaining("idx1", "idx2")); - assertThat(rd.getRunAs(), arrayContaining("m", "n")); + assertThat(rd.getIndicesPrivileges()[0].allowRestrictedIndices(), is(false)); assertThat(rd.getIndicesPrivileges()[0].getQuery(), nullValue()); + assertThat(rd.getRunAs(), arrayContaining("m", "n")); assertThat(rd.getApplicationPrivileges().length, equalTo(2)); assertThat(rd.getApplicationPrivileges()[0].getResources(), arrayContaining("object-123", "object-456")); assertThat(rd.getApplicationPrivileges()[0].getPrivileges(), arrayContaining("read", "delete")); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java index 276f7a35e14ca..f507edf97874f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java @@ -30,7 +30,7 @@ public class SecurityScrollTests extends SecurityIntegTestCase { public void testScrollIsPerUser() throws Exception { assertSecurityIndexActive(); securityClient().preparePutRole("scrollable") - .addIndices(new String[] { randomAlphaOfLengthBetween(4, 12) }, new String[] { "read" }, null, null, null) + .addIndices(new String[] { randomAlphaOfLengthBetween(4, 12) }, new String[] { "read" }, null, null, null, randomBoolean()) .get(); securityClient().preparePutUser("other", SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), getFastStoredHashAlgoForTests(), "scrollable") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java index 61724e44f6154..6dc6c6e9c78ce 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import java.io.IOException; import java.util.ArrayList; @@ -39,6 +40,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.is; public class IndicesPermissionTests extends ESTestCase { @@ -57,7 +59,8 @@ public void testAuthorize() { Set query = Collections.singleton(new BytesArray("{}")); String[] fields = new String[]{"_field"}; Role role = Role.builder("_role") - .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, "_index").build(); + .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_index") + .build(); IndicesAccessControl permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), md, fieldPermissionsCache); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field")); @@ -67,7 +70,8 @@ public void testAuthorize() { // no document level security: role = Role.builder("_role") - .add(new FieldPermissions(fieldPermissionDef(fields, null)), null, IndexPrivilege.ALL, "_index").build(); + .add(new FieldPermissions(fieldPermissionDef(fields, null)), null, IndexPrivilege.ALL, randomBoolean(), "_index") + .build(); permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), md, fieldPermissionsCache); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field")); @@ -75,7 +79,7 @@ public void testAuthorize() { assertThat(permissions.getIndexPermissions("_index").getQueries(), nullValue()); // no field level security: - role = Role.builder("_role").add(new FieldPermissions(), query, IndexPrivilege.ALL, "_index").build(); + role = Role.builder("_role").add(new FieldPermissions(), query, IndexPrivilege.ALL, randomBoolean(), "_index").build(); permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), md, fieldPermissionsCache); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); @@ -84,7 +88,7 @@ public void testAuthorize() { // index group associated with an alias: role = Role.builder("_role") - .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, "_alias") + .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_alias") .build(); permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_alias"), md, fieldPermissionsCache); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); @@ -103,7 +107,8 @@ public void testAuthorize() { String[] allFields = randomFrom(new String[]{"*"}, new String[]{"foo", "*"}, new String[]{randomAlphaOfLengthBetween(1, 10), "*"}); role = Role.builder("_role") - .add(new FieldPermissions(fieldPermissionDef(allFields, null)), query, IndexPrivilege.ALL, "_alias").build(); + .add(new FieldPermissions(fieldPermissionDef(allFields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_alias") + .build(); permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_alias"), md, fieldPermissionsCache); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); @@ -130,8 +135,9 @@ public void testAuthorize() { allFields = randomFrom(new String[]{"*"}, new String[]{"foo", "*"}, new String[]{randomAlphaOfLengthBetween(1, 10), "*"}); role = Role.builder("_role") - .add(new FieldPermissions(fieldPermissionDef(allFields, null)), fooQuery, IndexPrivilege.ALL, "_alias") - .add(new FieldPermissions(fieldPermissionDef(allFields, null)), query, IndexPrivilege.ALL, "_alias").build(); + .add(new FieldPermissions(fieldPermissionDef(allFields, null)), fooQuery, IndexPrivilege.ALL, randomBoolean(), "_alias") + .add(new FieldPermissions(fieldPermissionDef(allFields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_alias") + .build(); permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_alias"), md, fieldPermissionsCache); Set bothQueries = Sets.union(fooQuery, query); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); @@ -165,8 +171,8 @@ public void testAuthorizeMultipleGroupsMixedDls() { Set query = Collections.singleton(new BytesArray("{}")); String[] fields = new String[]{"_field"}; Role role = Role.builder("_role") - .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, "_index") - .add(new FieldPermissions(fieldPermissionDef(null, null)), null, IndexPrivilege.ALL, "*") + .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_index") + .add(new FieldPermissions(fieldPermissionDef(null, null)), null, IndexPrivilege.ALL, randomBoolean(), "*") .build(); IndicesAccessControl permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), md, fieldPermissionsCache); assertThat(permissions.getIndexPermissions("_index"), notNullValue()); @@ -219,9 +225,10 @@ public void testCorePermissionAuthorize() { .build(); FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); - IndicesPermission.Group group1 = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, "a1"); + IndicesPermission.Group group1 = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, randomBoolean(), + "a1"); IndicesPermission.Group group2 = new IndicesPermission.Group(IndexPrivilege.ALL, - new FieldPermissions(fieldPermissionDef(null, new String[]{"denied_field"})), null, "a1"); + new FieldPermissions(fieldPermissionDef(null, new String[]{"denied_field"})), null, randomBoolean(), "a1"); IndicesPermission core = new IndicesPermission(group1, group2); Map authzMap = core.authorize(SearchAction.NAME, Sets.newHashSet("a1", "ba"), metaData, fieldPermissionsCache); @@ -234,13 +241,15 @@ public void testCorePermissionAuthorize() { assertFalse(core.check("unknown")); // test with two indices - group1 = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, "a1"); + group1 = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, randomBoolean(), "a1"); group2 = new IndicesPermission.Group(IndexPrivilege.ALL, - new FieldPermissions(fieldPermissionDef(null, new String[]{"denied_field"})), null, "a1"); + new FieldPermissions(fieldPermissionDef(null, new String[]{"denied_field"})), null, randomBoolean(), "a1"); IndicesPermission.Group group3 = new IndicesPermission.Group(IndexPrivilege.ALL, - new FieldPermissions(fieldPermissionDef(new String[]{"*_field"}, new String[]{"denied_field"})), null, "a2"); + new FieldPermissions(fieldPermissionDef(new String[] { "*_field" }, new String[] { "denied_field" })), null, + randomBoolean(), "a2"); IndicesPermission.Group group4 = new IndicesPermission.Group(IndexPrivilege.ALL, - new FieldPermissions(fieldPermissionDef(new String[]{"*_field2"}, new String[]{"denied_field2"})), null, "a2"); + new FieldPermissions(fieldPermissionDef(new String[] { "*_field2" }, new String[] { "denied_field2" })), null, + randomBoolean(), "a2"); core = new IndicesPermission(group1, group2, group3, group4); authzMap = core.authorize(SearchAction.NAME, Sets.newHashSet("a1", "a2"), metaData, fieldPermissionsCache); assertFalse(authzMap.get("a1").getFieldPermissions().hasFieldLevelSecurity()); @@ -262,11 +271,41 @@ public void testErrorMessageIfIndexPatternIsTooComplex() { indices.add("*" + prefix + "*" + suffixBegin + "*"); } final ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, - () -> new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, indices.toArray(Strings.EMPTY_ARRAY))); + () -> new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, randomBoolean(), + indices.toArray(Strings.EMPTY_ARRAY))); assertThat(e.getMessage(), containsString(indices.get(0))); assertThat(e.getMessage(), containsString("too complex to evaluate")); } + public void testSecurityIndicesPermissions() { + final Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + final MetaData metaData = new MetaData.Builder() + .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .putAlias(new AliasMetaData.Builder(RestrictedIndicesNames.SECURITY_INDEX_NAME).build()) + .build(), true) + .build(); + FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + + // allow_restricted_indices: false + IndicesPermission.Group group = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, false, "*"); + Map authzMap = new IndicesPermission(group).authorize(SearchAction.NAME, + Sets.newHashSet(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME), metaData, + fieldPermissionsCache); + assertThat(authzMap.get(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX).isGranted(), is(false)); + assertThat(authzMap.get(RestrictedIndicesNames.SECURITY_INDEX_NAME).isGranted(), is(false)); + + // allow_restricted_indices: true + group = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, true, "*"); + authzMap = new IndicesPermission(group).authorize(SearchAction.NAME, + Sets.newHashSet(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME), metaData, + fieldPermissionsCache); + assertThat(authzMap.get(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX).isGranted(), is(true)); + assertThat(authzMap.get(RestrictedIndicesNames.SECURITY_INDEX_NAME).isGranted(), is(true)); + } + private static FieldPermissionsDefinition fieldPermissionDef(String[] granted, String[] denied) { return new FieldPermissionsDefinition(granted, denied); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 6801ffd6bdf84..2d78a8c946772 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -224,10 +224,17 @@ public void testNegativeLookupsAreCached() { return null; }).when(nativeRolesStore).getRoleDescriptors(isA(Set.class), any(ActionListener.class)); final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); + final NativePrivilegeStore nativePrivilegeStore = mock(NativePrivilegeStore.class); + doAnswer((invocationOnMock) -> { + ActionListener> callback = null; + callback = (ActionListener>) invocationOnMock.getArguments()[2]; + callback.onResponse(Collections.emptyList()); + return null; + }).when(nativePrivilegeStore).getPrivileges(isA(Set.class), isA(Set.class), any(ActionListener.class)); final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, - mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), + nativePrivilegeStore, Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), new XPackLicenseState(SECURITY_ENABLED_SETTINGS)); verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor @@ -257,8 +264,9 @@ public void testNegativeLookupsAreCached() { if (getSuperuserRole && numberOfTimesToCall > 0) { // the superuser role was requested so we get the role descriptors again verify(reservedRolesStore, times(2)).accept(anySetOf(String.class), any(ActionListener.class)); + verify(nativePrivilegeStore).getPrivileges(isA(Set.class),isA(Set.class), any(ActionListener.class)); } - verifyNoMoreInteractions(fileRolesStore, reservedRolesStore, nativeRolesStore); + verifyNoMoreInteractions(fileRolesStore, reservedRolesStore, nativeRolesStore, nativePrivilegeStore); } public void testNegativeLookupsCacheDisabled() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java index 8a637c6943502..41c5c39765199 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java @@ -54,8 +54,6 @@ public void testBasicLicense() throws Exception { public void testBuildResponse() throws Exception { final RestGetUserPrivilegesAction.RestListener listener = new RestGetUserPrivilegesAction.RestListener(null); - - final Set cluster = new LinkedHashSet<>(Arrays.asList("monitor", "manage_ml", "manage_watcher")); final Set conditionalCluster = Collections.singleton( new ConditionalClusterPrivileges.ManageApplicationPrivileges(new LinkedHashSet<>(Arrays.asList("app01", "app02")))); @@ -68,10 +66,11 @@ public void testBuildResponse() throws Exception { new LinkedHashSet<>(Arrays.asList( new BytesArray("{ \"term\": { \"access\": \"public\" } }"), new BytesArray("{ \"term\": { \"access\": \"standard\" } }") - )) + )), + false ), new GetUserPrivilegesResponse.Indices(Arrays.asList("index-4"), Collections.singleton("all"), - Collections.emptySet(), Collections.emptySet() + Collections.emptySet(), Collections.emptySet(), true ) )); final Set application = Sets.newHashSet( @@ -100,8 +99,10 @@ public void testBuildResponse() throws Exception { "\"query\":[" + "\"{ \\\"term\\\": { \\\"access\\\": \\\"public\\\" } }\"," + "\"{ \\\"term\\\": { \\\"access\\\": \\\"standard\\\" } }\"" + - "]}," + - "{\"names\":[\"index-4\"],\"privileges\":[\"all\"]}" + + "]," + + "\"allow_restricted_indices\":false" + + "}," + + "{\"names\":[\"index-4\"],\"privileges\":[\"all\"],\"allow_restricted_indices\":true}" + "]," + "\"applications\":[" + "{\"application\":\"app01\",\"privileges\":[\"read\",\"write\"],\"resources\":[\"*\"]}," + diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransportTests.java index 2f26456112a67..76048590cea99 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransportTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.nio.NioSelector; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.nio.NioGroupFactory; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ssl.SSLClientAuth; import org.elasticsearch.xpack.core.ssl.SSLService; @@ -46,6 +47,7 @@ public class SecurityNioHttpServerTransportTests extends ESTestCase { private SSLService sslService; private Environment env; private InetSocketAddress address = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + private NioGroupFactory nioGroupFactory; @Before public void createSSLService() { @@ -67,10 +69,11 @@ public void testDefaultClientAuth() throws IOException { Settings settings = Settings.builder() .put(env.settings()) .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true).build(); + nioGroupFactory = new NioGroupFactory(settings, logger); sslService = new SSLService(settings, env); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); SecurityNioHttpServerTransport.SecurityHttpChannelFactory factory = transport.channelFactory(); SocketChannel socketChannel = mock(SocketChannel.class); when(socketChannel.getRemoteAddress()).thenReturn(address); @@ -88,9 +91,10 @@ public void testOptionalClientAuth() throws IOException { .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) .put("xpack.security.http.ssl.client_authentication", value).build(); sslService = new SSLService(settings, env); + nioGroupFactory = new NioGroupFactory(settings, logger); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); SecurityNioHttpServerTransport.SecurityHttpChannelFactory factory = transport.channelFactory(); SocketChannel socketChannel = mock(SocketChannel.class); @@ -107,10 +111,11 @@ public void testRequiredClientAuth() throws IOException { .put(env.settings()) .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) .put("xpack.security.http.ssl.client_authentication", value).build(); + nioGroupFactory = new NioGroupFactory(settings, logger); sslService = new SSLService(settings, env); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); SecurityNioHttpServerTransport.SecurityHttpChannelFactory factory = transport.channelFactory(); SocketChannel socketChannel = mock(SocketChannel.class); @@ -128,9 +133,10 @@ public void testNoClientAuth() throws IOException { .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) .put("xpack.security.http.ssl.client_authentication", value).build(); sslService = new SSLService(settings, env); + nioGroupFactory = new NioGroupFactory(settings, logger); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); SecurityNioHttpServerTransport.SecurityHttpChannelFactory factory = transport.channelFactory(); SocketChannel socketChannel = mock(SocketChannel.class); @@ -146,9 +152,10 @@ public void testCustomSSLConfiguration() throws IOException { .put(env.settings()) .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true).build(); sslService = new SSLService(settings, env); + nioGroupFactory = new NioGroupFactory(settings, logger); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); SecurityNioHttpServerTransport.SecurityHttpChannelFactory factory = transport.channelFactory(); SocketChannel socketChannel = mock(SocketChannel.class); when(socketChannel.getRemoteAddress()).thenReturn(address); @@ -161,9 +168,10 @@ public void testCustomSSLConfiguration() throws IOException { .put("xpack.security.http.ssl.supported_protocols", "TLSv1.2") .build(); sslService = new SSLService(settings, TestEnvironment.newEnvironment(settings)); + nioGroupFactory = new NioGroupFactory(settings, logger); transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); factory = transport.channelFactory(); channel = factory.createChannel(mock(NioSelector.class), socketChannel); SSLEngine customEngine = SSLEngineUtils.getSSLEngine(channel); @@ -183,11 +191,12 @@ public void testThatExceptionIsThrownWhenConfiguredWithoutSslKey() { .build(); env = TestEnvironment.newEnvironment(settings); sslService = new SSLService(settings, env); + nioGroupFactory = new NioGroupFactory(settings, logger); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService)); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory)); assertThat(e.getMessage(), containsString("key must be provided")); } @@ -202,8 +211,9 @@ public void testNoExceptionWhenConfiguredWithoutSslKeySSLDisabled() { .build(); env = TestEnvironment.newEnvironment(settings); sslService = new SSLService(settings, env); + nioGroupFactory = new NioGroupFactory(settings, logger); SecurityNioHttpServerTransport transport = new SecurityNioHttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(PageCacheRecycler.class), mock(ThreadPool.class), - xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService); + xContentRegistry(), new NullDispatcher(), mock(IPFilter.class), sslService, nioGroupFactory); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java index 84d68ffd63e63..a30e1329432db 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportSettings; +import org.elasticsearch.transport.nio.NioGroupFactory; import org.elasticsearch.xpack.security.transport.AbstractSimpleSecurityTransportTestCase; import java.util.Collections; @@ -34,7 +35,8 @@ public MockTransportService nioFromThreadPool(Settings settings, ThreadPool thre .put(settings) .put("xpack.security.transport.ssl.enabled", true).build(); Transport transport = new SecurityNioTransport(settings1, version, threadPool, networkService, new MockPageCacheRecycler(settings), - namedWriteableRegistry, new NoneCircuitBreakerService(), null, createSSLService(settings1)) { + namedWriteableRegistry, new NoneCircuitBreakerService(), null, createSSLService(settings1), + new NioGroupFactory(settings, logger)) { @Override public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionProfile profile, diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java index f2a9f9d15343a..73713f91231d6 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java @@ -68,7 +68,7 @@ Tuple>> nextPage(String cursor, RequestMeta meta) thro } boolean queryClose(String cursor) throws SQLException { - return httpClient.queryClose(cursor); + return httpClient.queryClose(cursor, Mode.JDBC); } InfoResponse serverInfo() throws SQLException { diff --git a/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java index 16ed3053ab46f..e7263ba1fc1ce 100644 --- a/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java @@ -28,6 +28,7 @@ import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; /** * Tests specific to multiple nodes. @@ -111,7 +112,7 @@ private void assertCount(RestClient client, int count) throws IOException { expected.put("columns", singletonList(columnInfo(mode, "COUNT(*)", "long", JDBCType.BIGINT, 20))); expected.put("rows", singletonList(singletonList(count))); - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); request.setJsonEntity("{\"query\": \"SELECT COUNT(*) FROM test\"" + mode(mode) + "}"); Map actual = responseToMap(client.performRequest(request)); diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java index 86772a49372ba..d5e7e7cc5084c 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java @@ -32,6 +32,7 @@ import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -180,7 +181,7 @@ private static Map runSql(@Nullable String asUser, String mode, } private static Map runSql(@Nullable String asUser, HttpEntity entity) throws IOException { - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); if (asUser != null) { RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("es-security-runas-user", asUser); diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java index 66492bab8f56f..0b71fe1c88cb1 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java @@ -37,6 +37,7 @@ import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; public class UserFunctionIT extends ESRestTestCase { @@ -178,7 +179,7 @@ private Map runSql(String asUser, String mode, String sql) throw } private Map runSql(String asUser, HttpEntity entity) throws IOException { - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); if (asUser != null) { RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("es-security-runas-user", asUser); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java index 868c9584a0057..e346bfc649b23 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java @@ -28,8 +28,10 @@ import java.util.Locale; import java.util.Map; +import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; import static org.elasticsearch.xpack.sql.proto.RequestInfo.CLIENT_IDS; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; +import static org.elasticsearch.xpack.sql.proto.Mode.CLI; public abstract class SqlProtocolTestCase extends ESRestTestCase { @@ -79,62 +81,76 @@ public void testIPs() throws IOException { } public void testDateTimeIntervals() throws IOException { - assertQuery("SELECT INTERVAL '326' YEAR", "INTERVAL '326' YEAR", "interval_year", "P326Y", 7); - assertQuery("SELECT INTERVAL '50' MONTH", "INTERVAL '50' MONTH", "interval_month", "P50M", 7); - assertQuery("SELECT INTERVAL '520' DAY", "INTERVAL '520' DAY", "interval_day", "PT12480H", 23); - assertQuery("SELECT INTERVAL '163' HOUR", "INTERVAL '163' HOUR", "interval_hour", "PT163H", 23); - assertQuery("SELECT INTERVAL '163' MINUTE", "INTERVAL '163' MINUTE", "interval_minute", "PT2H43M", 23); - assertQuery("SELECT INTERVAL '223.16' SECOND", "INTERVAL '223.16' SECOND", "interval_second", "PT3M43.016S", 23); - assertQuery("SELECT INTERVAL '163-11' YEAR TO MONTH", "INTERVAL '163-11' YEAR TO MONTH", "interval_year_to_month", "P163Y11M", 7); - assertQuery("SELECT INTERVAL '163 12' DAY TO HOUR", "INTERVAL '163 12' DAY TO HOUR", "interval_day_to_hour", "PT3924H", 23); + assertQuery("SELECT INTERVAL '326' YEAR", "INTERVAL '326' YEAR", "interval_year", "P326Y", "+326-0", 7); + assertQuery("SELECT INTERVAL '50' MONTH", "INTERVAL '50' MONTH", "interval_month", "P50M", "+0-50", 7); + assertQuery("SELECT INTERVAL '520' DAY", "INTERVAL '520' DAY", "interval_day", "PT12480H", "+520 00:00:00.0", 23); + assertQuery("SELECT INTERVAL '163' HOUR", "INTERVAL '163' HOUR", "interval_hour", "PT163H", "+6 19:00:00.0", 23); + assertQuery("SELECT INTERVAL '163' MINUTE", "INTERVAL '163' MINUTE", "interval_minute", "PT2H43M", "+0 02:43:00.0", 23); + assertQuery("SELECT INTERVAL '223.16' SECOND", "INTERVAL '223.16' SECOND", "interval_second", "PT3M43.016S", "+0 00:03:43.16", 23); + assertQuery("SELECT INTERVAL '163-11' YEAR TO MONTH", "INTERVAL '163-11' YEAR TO MONTH", "interval_year_to_month", "P163Y11M", + "+163-11", 7); + assertQuery("SELECT INTERVAL '163 12' DAY TO HOUR", "INTERVAL '163 12' DAY TO HOUR", "interval_day_to_hour", "PT3924H", + "+163 12:00:00.0", 23); assertQuery("SELECT INTERVAL '163 12:39' DAY TO MINUTE", "INTERVAL '163 12:39' DAY TO MINUTE", "interval_day_to_minute", - "PT3924H39M", 23); + "PT3924H39M", "+163 12:39:00.0", 23); assertQuery("SELECT INTERVAL '163 12:39:59.163' DAY TO SECOND", "INTERVAL '163 12:39:59.163' DAY TO SECOND", - "interval_day_to_second", "PT3924H39M59.163S", 23); + "interval_day_to_second", "PT3924H39M59.163S", "+163 12:39:59.163", 23); assertQuery("SELECT INTERVAL -'163 23:39:56.23' DAY TO SECOND", "INTERVAL -'163 23:39:56.23' DAY TO SECOND", - "interval_day_to_second", "PT-3935H-39M-56.023S", 23); + "interval_day_to_second", "PT-3935H-39M-56.023S", "-163 23:39:56.23", 23); assertQuery("SELECT INTERVAL '163:39' HOUR TO MINUTE", "INTERVAL '163:39' HOUR TO MINUTE", "interval_hour_to_minute", - "PT163H39M", 23); + "PT163H39M", "+6 19:39:00.0", 23); assertQuery("SELECT INTERVAL '163:39:59.163' HOUR TO SECOND", "INTERVAL '163:39:59.163' HOUR TO SECOND", "interval_hour_to_second", - "PT163H39M59.163S", 23); + "PT163H39M59.163S", "+6 19:39:59.163", 23); assertQuery("SELECT INTERVAL '163:59.163' MINUTE TO SECOND", "INTERVAL '163:59.163' MINUTE TO SECOND", "interval_minute_to_second", - "PT2H43M59.163S", 23); + "PT2H43M59.163S", "+0 02:43:59.163", 23); } - @SuppressWarnings({ "unchecked" }) - private void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize) throws IOException { + private void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize) + throws IOException { + assertQuery(sql, columnName, columnType, columnValue, null, displaySize); + } + + private void assertQuery(String sql, String columnName, String columnType, Object columnValue, Object cliColumnValue, int displaySize) + throws IOException { for (Mode mode : Mode.values()) { - Map response = runSql(mode.toString(), sql); - List columns = (ArrayList) response.get("columns"); - assertEquals(1, columns.size()); + boolean isCliCheck = mode == CLI && cliColumnValue != null; + assertQuery(sql, columnName, columnType, isCliCheck ? cliColumnValue : columnValue, displaySize, mode); + } + } + + @SuppressWarnings({ "unchecked" }) + private void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize, Mode mode) + throws IOException { + Map response = runSql(mode.toString(), sql); + List columns = (ArrayList) response.get("columns"); + assertEquals(1, columns.size()); - Map column = (HashMap) columns.get(0); - assertEquals(columnName, column.get("name")); - assertEquals(columnType, column.get("type")); - if (mode != Mode.PLAIN) { - assertEquals(3, column.size()); - assertEquals(displaySize, column.get("display_size")); - } else { - assertEquals(2, column.size()); - } - - List rows = (ArrayList) response.get("rows"); - assertEquals(1, rows.size()); - List row = (ArrayList) rows.get(0); - assertEquals(1, row.size()); + Map column = (HashMap) columns.get(0); + assertEquals(columnName, column.get("name")); + assertEquals(columnType, column.get("type")); + if (Mode.isDriver(mode)) { + assertEquals(3, column.size()); + assertEquals(displaySize, column.get("display_size")); + } else { + assertEquals(2, column.size()); + } + + List rows = (ArrayList) response.get("rows"); + assertEquals(1, rows.size()); + List row = (ArrayList) rows.get(0); + assertEquals(1, row.size()); - // from xcontent we can get float or double, depending on the conversion - // method of the specific xcontent format implementation - if (columnValue instanceof Float && row.get(0) instanceof Double) { - assertEquals(columnValue, (float)((Number) row.get(0)).doubleValue()); - } else { - assertEquals(columnValue, row.get(0)); - } + // from xcontent we can get float or double, depending on the conversion + // method of the specific xcontent format implementation + if (columnValue instanceof Float && row.get(0) instanceof Double) { + assertEquals(columnValue, (float)((Number) row.get(0)).doubleValue()); + } else { + assertEquals(columnValue, row.get(0)); } } private Map runSql(String mode, String sql) throws IOException { - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); String requestContent = "{\"query\":\"" + sql + "\"" + mode(mode) + "}"; String format = randomFrom(XContentType.values()).name().toLowerCase(Locale.ROOT); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java index e91f9fc727238..a5b8f5cb4766a 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.sql.qa.jdbc; import org.apache.logging.log4j.Logger; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.StringUtils; @@ -19,6 +19,8 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; + public abstract class JdbcTestUtils { public static final String SQL_TRACE = "org.elasticsearch.xpack.sql:TRACE"; @@ -131,7 +133,7 @@ public static void logLikeCLI(ResultSet rs, Logger logger) throws SQLException { data.add(entry); } - CliFormatter formatter = new CliFormatter(cols, data); + BasicFormatter formatter = new BasicFormatter(cols, data, CLI); logger.info("\n" + formatter.formatWithHeader(cols, data)); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index 331c02ffcb193..6f070491df168 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -49,6 +49,8 @@ * user rather than to the JDBC driver or CLI. */ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTestCase { + + public static String SQL_QUERY_REST_ENDPOINT = org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; /** * Builds that map that is returned in the header for each column. */ @@ -309,7 +311,7 @@ private Map runSql(String mode, String sql, String suffix) throw } protected Map runSql(HttpEntity sql, String suffix) throws IOException { - Request request = new Request("POST", "/_sql" + suffix); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT + suffix); request.addParameter("error_trace", "true"); // Helps with debugging in case something crazy happens on the server. request.addParameter("pretty", "true"); // Improves error reporting readability if (randomBoolean()) { @@ -640,7 +642,7 @@ private Tuple runSqlAsText(String sql, String accept) throws IOE * rather than the {@code format} parameter. */ private Tuple runSqlAsText(String suffix, HttpEntity entity, String accept) throws IOException { - Request request = new Request("POST", "/_sql" + suffix); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT + suffix); request.addParameter("error_trace", "true"); request.setEntity(entity); RequestOptions.Builder options = request.getOptions().toBuilder(); @@ -658,7 +660,7 @@ private Tuple runSqlAsText(String suffix, HttpEntity entity, Str * rather than an {@code Accept} header. */ private Tuple runSqlAsTextFormat(String sql, String format) throws IOException { - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); request.addParameter("error_trace", "true"); request.addParameter("format", format); request.setJsonEntity("{\"query\":\"" + sql + "\"}"); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java index 42f9e59840f86..4d864961e8ecd 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java @@ -26,6 +26,9 @@ import java.util.Locale; import java.util.Map; +import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; +import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_STATS_REST_ENDPOINT; +import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_TRANSLATE_REST_ENDPOINT; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; public abstract class RestSqlUsageTestCase extends ESRestTestCase { @@ -226,7 +229,7 @@ private void index(List docs) throws IOException { } private Map getStats() throws UnsupportedOperationException, IOException { - Request request = new Request("GET", "/_sql/stats"); + Request request = new Request("GET", SQL_STATS_REST_ENDPOINT); Map responseAsMap; try (InputStream content = client().performRequest(request).getEntity().getContent()) { responseAsMap = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); @@ -236,7 +239,7 @@ private Map getStats() throws UnsupportedOperationException, IOE } private void runTranslate(String sql) throws IOException { - Request request = new Request("POST", "/_sql/translate"); + Request request = new Request("POST", SQL_TRANSLATE_REST_ENDPOINT); if (randomBoolean()) { // We default to JSON but we force it randomly for extra coverage request.addParameter("format", "json"); @@ -255,9 +258,10 @@ private void runSql(String sql) throws IOException { String mode = Mode.PLAIN.toString(); if (clientType.equals(ClientType.JDBC.toString())) { mode = Mode.JDBC.toString(); - } - if (clientType.startsWith(ClientType.ODBC.toString())) { + } else if (clientType.startsWith(ClientType.ODBC.toString())) { mode = Mode.ODBC.toString(); + } else if (clientType.equals(ClientType.CLI.toString())) { + mode = Mode.CLI.toString(); } runSql(mode, clientType, sql); @@ -276,7 +280,7 @@ private void assertTranslateQueryMetric(int expected, Map respon } private void runSql(String mode, String restClient, String sql) throws IOException { - Request request = new Request("POST", "/_sql"); + Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); request.addParameter("error_trace", "true"); // Helps with debugging in case something crazy happens on the server. request.addParameter("pretty", "true"); // Improves error reporting readability if (randomBoolean()) { diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..7a4b68f8d857d --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +e3a95ff3cbd96e2c05b90932c20ca6374cdcdbe9 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index efbb9ada534a5..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad32720fe677becb93a26692338b63754613aa50 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/BasicFormatter.java similarity index 77% rename from x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java rename to x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/BasicFormatter.java index f3f63fd18a275..fec2a3ee621e1 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/BasicFormatter.java @@ -14,26 +14,46 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Objects; +import java.util.function.Function; /** - * Formats {@link SqlQueryResponse} for the CLI. {@linkplain Writeable} so + * Formats {@link SqlQueryResponse} for the CLI and for the TEXT format. {@linkplain Writeable} so * that its state can be saved between pages of results. */ -public class CliFormatter implements Writeable { +public class BasicFormatter implements Writeable { /** * The minimum width for any column in the formatted results. */ private static final int MIN_COLUMN_WIDTH = 15; private int[] width; + + public enum FormatOption { + CLI(Objects::toString), + TEXT(StringUtils::toString); + + private final Function apply; + + FormatOption(Function apply) { + this.apply = l -> l == null ? null : apply.apply(l); + } + + public final String apply(Object l) { + return apply.apply(l); + } + } + + private final FormatOption formatOption; /** - * Create a new {@linkplain CliFormatter} for formatting responses similar + * Create a new {@linkplain BasicFormatter} for formatting responses similar * to the provided columns and rows. */ - public CliFormatter(List columns, List> rows) { + public BasicFormatter(List columns, List> rows, FormatOption formatOption) { // Figure out the column widths: // 1. Start with the widths of the column names + this.formatOption = formatOption; width = new int[columns.size()]; for (int i = 0; i < width.length; i++) { // TODO read the width from the data type? @@ -43,24 +63,24 @@ public CliFormatter(List columns, List> rows) { // 2. Expand columns to fit the largest value for (List row : rows) { for (int i = 0; i < width.length; i++) { - // TODO are we sure toString is correct here? What about dates that come back as longs. - // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 - width[i] = Math.max(width[i], StringUtils.toString(row.get(i)).length()); + width[i] = Math.max(width[i], formatOption.apply(row.get(i)).length()); } } } - public CliFormatter(StreamInput in) throws IOException { + public BasicFormatter(StreamInput in) throws IOException { width = in.readIntArray(); + formatOption = in.readEnum(FormatOption.class); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeIntArray(width); + out.writeEnum(formatOption); } - + /** - * Format the provided {@linkplain SqlQueryResponse} for the CLI + * Format the provided {@linkplain SqlQueryResponse} for the set format * including the header lines. */ public String formatWithHeader(List columns, List> rows) { @@ -103,7 +123,7 @@ public String formatWithHeader(List columns, List> rows } /** - * Format the provided {@linkplain SqlQueryResponse} for the CLI + * Format the provided {@linkplain SqlQueryResponse} for the set format * without the header lines. */ public String formatWithoutHeader(List> rows) { @@ -116,9 +136,7 @@ private String formatWithoutHeader(StringBuilder sb, List> rows) { if (i > 0) { sb.append('|'); } - // TODO are we sure toString is correct here? What about dates that come back as longs. - // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 - String string = StringUtils.toString(row.get(i)); + String string = formatOption.apply(row.get(i)); if (string.length() <= width[i]) { // Pad sb.append(string); @@ -159,12 +177,12 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - CliFormatter that = (CliFormatter) o; - return Arrays.equals(width, that.width); + BasicFormatter that = (BasicFormatter) o; + return Arrays.equals(width, that.width) && formatOption == that.formatOption; } @Override public int hashCode() { - return Arrays.hashCode(width); + return Objects.hash(width, formatOption); } } diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java index 7301e86befa02..465b405d01ae8 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java @@ -24,6 +24,7 @@ import static java.util.Collections.unmodifiableList; import static org.elasticsearch.xpack.sql.action.AbstractSqlQueryRequest.CURSOR; +import static org.elasticsearch.xpack.sql.proto.Mode.CLI; /** * Response to perform an sql query @@ -36,6 +37,7 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject private List columns; // TODO investigate reusing Page here - it probably is much more efficient private List> rows; + private static final String INTERVAL_CLASS_NAME = "Interval"; public SqlQueryResponse() { } @@ -173,7 +175,12 @@ public static XContentBuilder value(XContentBuilder builder, Mode mode, Object v ZonedDateTime zdt = (ZonedDateTime) value; // use the ISO format builder.value(StringUtils.toString(zdt)); - } else { + } else if (mode == CLI && value != null && value.getClass().getSuperclass().getSimpleName().equals(INTERVAL_CLASS_NAME)) { + // use the SQL format for intervals when sending back the response for CLI + // all other clients will receive ISO 8601 formatted intervals + builder.value(value.toString()); + } + else { builder.value(value); } return builder; diff --git a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java index 4e41dddb46c3c..c9bdc8d670c6d 100644 --- a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java +++ b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java @@ -68,7 +68,7 @@ public void testClearCursorRequestParser() throws IOException { request = generateRequest("{\"cursor\" : \"whatever\", \"client_id\" : \"CLI\"}", SqlClearCursorRequest::fromXContent); - assertEquals("cli", request.clientId()); + assertNull(request.clientId()); assertEquals(Mode.PLAIN, request.mode()); assertEquals("whatever", request.getCursor()); diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java index 5cb8bc0a4cd36..86b5cf6c36ef2 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java @@ -5,26 +5,29 @@ */ package org.elasticsearch.xpack.sql.cli.command; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.cli.CliTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.JreHttpUrlConnection; +import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.sql.SQLException; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; + public class ServerQueryCliCommand extends AbstractServerCliCommand { @Override protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String line) { SqlQueryResponse response = null; HttpClient cliClient = cliSession.getClient(); - CliFormatter cliFormatter; + BasicFormatter formatter; String data; try { response = cliClient.queryInit(line, cliSession.getFetchSize()); - cliFormatter = new CliFormatter(response.columns(), response.rows()); - data = cliFormatter.formatWithHeader(response.columns(), response.rows()); + formatter = new BasicFormatter(response.columns(), response.rows(), CLI); + data = formatter.formatWithHeader(response.columns(), response.rows()); while (true) { handleText(terminal, data); if (response.cursor().isEmpty()) { @@ -36,7 +39,7 @@ protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String l terminal.println(cliSession.getFetchSeparator()); } response = cliSession.getClient().nextPage(response.cursor()); - data = cliFormatter.formatWithoutHeader(response.rows()); + data = formatter.formatWithoutHeader(response.rows()); } } catch (SQLException e) { if (JreHttpUrlConnection.SQL_STATE_BAD_SERVER.equals(e.getSQLState())) { @@ -46,7 +49,7 @@ protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String l } if (response != null) { try { - cliClient.queryClose(response.cursor()); + cliClient.queryClose(response.cursor(), Mode.CLI); } catch (SQLException ex) { terminal.error("Could not close cursor", ex.getMessage()); } diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java index 498bbf7754c8d..9d4ded4a39c14 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.sql.cli.TestTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.proto.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.sql.SQLException; @@ -93,14 +94,14 @@ public void testCursorCleanupOnError() throws Exception { cliSession.setFetchSize(15); when(client.queryInit("test query", 15)).thenReturn(fakeResponse("my_cursor1", true, "first")); when(client.nextPage("my_cursor1")).thenThrow(new SQLException("test exception")); - when(client.queryClose("my_cursor1")).thenReturn(true); + when(client.queryClose("my_cursor1", Mode.CLI)).thenReturn(true); ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); assertEquals(" field \n---------------\nfirst \n" + "Bad request [test exception]\n", testTerminal.toString()); verify(client, times(1)).queryInit(eq("test query"), eq(15)); verify(client, times(1)).nextPage(any()); - verify(client, times(1)).queryClose(eq("my_cursor1")); + verify(client, times(1)).queryClose(eq("my_cursor1"), eq(Mode.CLI)); verifyNoMoreInteractions(client); } diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java index 4fe6a39820bc7..c3f35aefd65f4 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java @@ -36,8 +36,6 @@ import java.util.Collections; import java.util.function.Function; -import static org.elasticsearch.xpack.sql.proto.RequestInfo.CLI; - /** * A specialized high-level REST client with support for SQL-related functions. * Similar to JDBC and the underlying HTTP connection, this class is not thread-safe @@ -65,10 +63,10 @@ public MainResponse serverInfo() throws SQLException { public SqlQueryResponse queryInit(String query, int fetchSize) throws SQLException { // TODO allow customizing the time zone - this is what session set/reset/get should be about - // method called only from CLI. "client_id" is set to "cli" + // method called only from CLI SqlQueryRequest sqlRequest = new SqlQueryRequest(query, Collections.emptyList(), null, ZoneId.of("Z"), fetchSize, TimeValue.timeValueMillis(cfg.queryTimeout()), TimeValue.timeValueMillis(cfg.pageTimeout()), - new RequestInfo(Mode.PLAIN, CLI)); + new RequestInfo(Mode.CLI)); return query(sqlRequest); } @@ -77,15 +75,15 @@ public SqlQueryResponse query(SqlQueryRequest sqlRequest) throws SQLException { } public SqlQueryResponse nextPage(String cursor) throws SQLException { - // method called only from CLI. "client_id" is set to "cli" + // method called only from CLI SqlQueryRequest sqlRequest = new SqlQueryRequest(cursor, TimeValue.timeValueMillis(cfg.queryTimeout()), - TimeValue.timeValueMillis(cfg.pageTimeout()), new RequestInfo(Mode.PLAIN, CLI)); + TimeValue.timeValueMillis(cfg.pageTimeout()), new RequestInfo(Mode.CLI)); return post(Protocol.SQL_QUERY_REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); } - public boolean queryClose(String cursor) throws SQLException { + public boolean queryClose(String cursor, Mode mode) throws SQLException { SqlClearCursorResponse response = post(Protocol.CLEAR_CURSOR_REST_ENDPOINT, - new SqlClearCursorRequest(cursor, new RequestInfo(Mode.PLAIN)), + new SqlClearCursorRequest(cursor, new RequestInfo(mode)), SqlClearCursorResponse::fromXContent); return response.isSucceeded(); } diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java index 5b4991bfba1c0..59a6e82e9874d 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java @@ -35,6 +35,7 @@ import javax.sql.rowset.serial.SerialException; import static java.util.Collections.emptyMap; +import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; /** * Low-level http client using the built-in {@link HttpURLConnection}. @@ -47,7 +48,8 @@ public class JreHttpUrlConnection implements Closeable { * error. */ public static final String SQL_STATE_BAD_SERVER = "bad_server"; - private static final String SQL_NOT_AVAILABLE_ERROR_MESSAGE = "request [/_sql] contains unrecognized parameter: [mode]"; + private static final String SQL_NOT_AVAILABLE_ERROR_MESSAGE = "request [" + SQL_QUERY_REST_ENDPOINT + + "] contains unrecognized parameter: [mode]"; public static R http(String path, String query, ConnectionConfiguration cfg, Function handler) { final URI uriPath = cfg.baseUri().resolve(path); // update path if needed diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java index a301b41d3887a..26eb4867b3509 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java @@ -12,6 +12,7 @@ * SQL protocol mode */ public enum Mode { + CLI, PLAIN, JDBC, ODBC; diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java index b860832856a82..97a3622a64f1f 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java @@ -13,7 +13,6 @@ import java.util.Set; public class RequestInfo { - public static final String CLI = "cli"; private static final String CANVAS = "canvas"; public static final String ODBC_32 = "odbc32"; private static final String ODBC_64 = "odbc64"; @@ -22,7 +21,6 @@ public class RequestInfo { static { Set clientIds = new HashSet<>(4); - clientIds.add(CLI); clientIds.add(CANVAS); clientIds.add(ODBC_32); clientIds.add(ODBC_64); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java index 9f569206438d2..c80b399d447e6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java @@ -65,6 +65,7 @@ public class SqlPlugin extends Plugin implements ActionPlugin { } break; case PLAIN: + case CLI: if (licenseState.isSqlAllowed() == false) { throw LicenseUtils.newComplianceException(XPackField.SQL); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java index 34c0f1c6d74f7..62963a99b2a98 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java @@ -7,7 +7,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.action.SqlQueryResponse; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.session.Cursor; @@ -21,6 +21,8 @@ import java.util.Objects; import java.util.function.Function; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.TEXT; + /** * Templating class for displaying SQL responses in text formats. */ @@ -40,21 +42,21 @@ enum TextFormat { PLAIN_TEXT() { @Override String format(Cursor cursor, RestRequest request, SqlQueryResponse response) { - final CliFormatter formatter; - if (cursor instanceof CliFormatterCursor) { - formatter = ((CliFormatterCursor) cursor).getCliFormatter(); + final BasicFormatter formatter; + if (cursor instanceof TextFormatterCursor) { + formatter = ((TextFormatterCursor) cursor).getFormatter(); return formatter.formatWithoutHeader(response.rows()); } else { - formatter = new CliFormatter(response.columns(), response.rows()); + formatter = new BasicFormatter(response.columns(), response.rows(), TEXT); return formatter.formatWithHeader(response.columns(), response.rows()); } } @Override Cursor wrapCursor(Cursor oldCursor, SqlQueryResponse response) { - CliFormatter formatter = (oldCursor instanceof CliFormatterCursor) ? - ((CliFormatterCursor) oldCursor).getCliFormatter() : new CliFormatter(response.columns(), response.rows()); - return CliFormatterCursor.wrap(super.wrapCursor(oldCursor, response), formatter); + BasicFormatter formatter = (oldCursor instanceof TextFormatterCursor) ? + ((TextFormatterCursor) oldCursor).getFormatter() : new BasicFormatter(response.columns(), response.rows(), TEXT); + return TextFormatterCursor.wrap(super.wrapCursor(oldCursor, response), formatter); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java similarity index 78% rename from x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java rename to x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java index b226e899e4d09..4ab1d77fe21ff 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java @@ -10,7 +10,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.RowSet; @@ -21,31 +21,31 @@ /** * The cursor that wraps all necessary information for textual representation of the result table */ -public class CliFormatterCursor implements Cursor { +public class TextFormatterCursor implements Cursor { public static final String NAME = "f"; private final Cursor delegate; - private final CliFormatter formatter; + private final BasicFormatter formatter; /** * If the newCursor is empty, returns an empty cursor. Otherwise, creates a new - * CliFormatterCursor that wraps the newCursor. + * TextFormatterCursor that wraps the newCursor. */ - public static Cursor wrap(Cursor newCursor, CliFormatter formatter) { + public static Cursor wrap(Cursor newCursor, BasicFormatter formatter) { if (newCursor == EMPTY) { return EMPTY; } - return new CliFormatterCursor(newCursor, formatter); + return new TextFormatterCursor(newCursor, formatter); } - private CliFormatterCursor(Cursor delegate, CliFormatter formatter) { + private TextFormatterCursor(Cursor delegate, BasicFormatter formatter) { this.delegate = delegate; this.formatter = formatter; } - public CliFormatterCursor(StreamInput in) throws IOException { + public TextFormatterCursor(StreamInput in) throws IOException { delegate = in.readNamedWriteable(Cursor.class); - formatter = new CliFormatter(in); + formatter = new BasicFormatter(in); } @Override @@ -54,7 +54,7 @@ public void writeTo(StreamOutput out) throws IOException { formatter.writeTo(out); } - public CliFormatter getCliFormatter() { + public BasicFormatter getFormatter() { return formatter; } @@ -81,7 +81,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - CliFormatterCursor that = (CliFormatterCursor) o; + TextFormatterCursor that = (TextFormatterCursor) o; return Objects.equals(delegate, that.delegate) && Objects.equals(formatter, that.formatter); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java index f8d0393303d64..25989ab0af7d2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractors; import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; import org.elasticsearch.xpack.sql.expression.literal.Intervals; -import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; +import org.elasticsearch.xpack.sql.plugin.TextFormatterCursor; import java.io.ByteArrayOutputStream; import java.io.OutputStream; @@ -47,7 +47,7 @@ public static List getNamedWriteables() { entries.add(new NamedWriteableRegistry.Entry(Cursor.class, EmptyCursor.NAME, in -> Cursor.EMPTY)); entries.add(new NamedWriteableRegistry.Entry(Cursor.class, ScrollCursor.NAME, ScrollCursor::new)); entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CompositeAggregationCursor.NAME, CompositeAggregationCursor::new)); - entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CliFormatterCursor.NAME, CliFormatterCursor::new)); + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, TextFormatterCursor.NAME, TextFormatterCursor::new)); // plus all their dependencies entries.addAll(Processors.getNamedWriteables()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/CliFormatterTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/BasicFormatterTests.java similarity index 63% rename from x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/CliFormatterTests.java rename to x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/BasicFormatterTests.java index e1b551d2aeddb..fcab7eedca801 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/CliFormatterTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/BasicFormatterTests.java @@ -6,28 +6,32 @@ package org.elasticsearch.xpack.sql.action; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.Mode; import java.util.Arrays; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; import static org.hamcrest.Matchers.arrayWithSize; -public class CliFormatterTests extends ESTestCase { - private final SqlQueryResponse firstResponse = new SqlQueryResponse("", Mode.PLAIN, +public class BasicFormatterTests extends ESTestCase { + private final FormatOption format = randomFrom(FormatOption.values()); + private final SqlQueryResponse firstResponse = new SqlQueryResponse("", format == CLI ? Mode.CLI : Mode.PLAIN, Arrays.asList( new ColumnInfo("", "foo", "string", 0), new ColumnInfo("", "bar", "long", 15), new ColumnInfo("", "15charwidename!", "double", 25), new ColumnInfo("", "superduperwidename!!!", "double", 25), - new ColumnInfo("", "baz", "keyword", 0)), + new ColumnInfo("", "baz", "keyword", 0), + new ColumnInfo("", "date", "datetime", 24)), Arrays.asList( - Arrays.asList("15charwidedata!", 1, 6.888, 12, "rabbit"), - Arrays.asList("dog", 1.7976931348623157E308, 123124.888, 9912, "goat"))); - private final CliFormatter formatter = new CliFormatter(firstResponse.columns(), firstResponse.rows()); + Arrays.asList("15charwidedata!", 1, 6.888, 12, "rabbit", "1953-09-02T00:00:00.000Z"), + Arrays.asList("dog", 1.7976931348623157E308, 123124.888, 9912, "goat", "2000-03-15T21:34:37.443Z"))); + private final BasicFormatter formatter = new BasicFormatter(firstResponse.columns(), firstResponse.rows(), format); /** - * Tests for {@link CliFormatter#formatWithHeader}, values + * Tests for {@link BasicFormatter#formatWithHeader}, values * of exactly the minimum column size, column names of exactly * the minimum column size, column headers longer than the * minimum column size, and values longer than the minimum @@ -36,24 +40,30 @@ public class CliFormatterTests extends ESTestCase { public void testFormatWithHeader() { String[] result = formatter.formatWithHeader(firstResponse.columns(), firstResponse.rows()).split("\n"); assertThat(result, arrayWithSize(4)); - assertEquals(" foo | bar |15charwidename!|superduperwidename!!!| baz ", result[0]); - assertEquals("---------------+----------------------+---------------+---------------------+---------------", result[1]); - assertEquals("15charwidedata!|1 |6.888 |12 |rabbit ", result[2]); - assertEquals("dog |1.7976931348623157E308|123124.888 |9912 |goat ", result[3]); + assertEquals(" foo | bar |15charwidename!|superduperwidename!!!| baz |" + + " date ", result[0]); + assertEquals("---------------+----------------------+---------------+---------------------+---------------+" + + "------------------------", result[1]); + assertEquals("15charwidedata!|1 |6.888 |12 |rabbit |" + + "1953-09-02T00:00:00.000Z", result[2]); + assertEquals("dog |1.7976931348623157E308|123124.888 |9912 |goat |" + + "2000-03-15T21:34:37.443Z", result[3]); } /** - * Tests for {@link CliFormatter#formatWithoutHeader} and + * Tests for {@link BasicFormatter#formatWithoutHeader} and * truncation of long columns. */ public void testFormatWithoutHeader() { String[] result = formatter.formatWithoutHeader( Arrays.asList( - Arrays.asList("ohnotruncateddata", 4, 1, 77, "wombat"), - Arrays.asList("dog", 2, 123124.888, 9912, "goat"))).split("\n"); + Arrays.asList("ohnotruncateddata", 4, 1, 77, "wombat", "1955-01-21T01:02:03.342Z"), + Arrays.asList("dog", 2, 123124.888, 9912, "goat", "2231-12-31T23:59:59.999Z"))).split("\n"); assertThat(result, arrayWithSize(2)); - assertEquals("ohnotruncatedd~|4 |1 |77 |wombat ", result[0]); - assertEquals("dog |2 |123124.888 |9912 |goat ", result[1]); + assertEquals("ohnotruncatedd~|4 |1 |77 |wombat |" + + "1955-01-21T01:02:03.342Z", result[0]); + assertEquals("dog |2 |123124.888 |9912 |goat |" + + "2231-12-31T23:59:59.999Z", result[1]); } /** diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java index f3e27d852b3fe..67b0f2badd883 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java @@ -13,9 +13,9 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; import org.elasticsearch.xpack.sql.TestUtils; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.action.SqlQueryResponse; -import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; +import org.elasticsearch.xpack.sql.plugin.TextFormatterCursor; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.session.Cursor; @@ -32,6 +32,8 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyZeroInteractions; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.TEXT; public class CursorTests extends ESTestCase { @@ -79,12 +81,20 @@ static Cursor randomNonEmptyCursor() { () -> { SqlQueryResponse response = createRandomSqlResponse(); if (response.columns() != null && response.rows() != null) { - return CliFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), - new CliFormatter(response.columns(), response.rows())); + return TextFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), + new BasicFormatter(response.columns(), response.rows(), CLI)); + } else { + return ScrollCursorTests.randomScrollCursor(); + } + }, + () -> { + SqlQueryResponse response = createRandomSqlResponse(); + if (response.columns() != null && response.rows() != null) { + return TextFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), + new BasicFormatter(response.columns(), response.rows(), TEXT)); } else { return ScrollCursorTests.randomScrollCursor(); } - } ); return cursorSupplier.get(); diff --git a/x-pack/qa/audit-tests/build.gradle b/x-pack/qa/audit-tests/build.gradle index 126e3834bab4a..6afe5f01ae1e3 100644 --- a/x-pack/qa/audit-tests/build.gradle +++ b/x-pack/qa/audit-tests/build.gradle @@ -16,7 +16,7 @@ task copyXPackPluginProps(type: Copy) { // wth is this? project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) integTestCluster { - distribution 'zip' + distribution 'default' setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index d154a1e248633..c6c4634e58b61 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -186,6 +186,10 @@ subprojects { setting 'xpack.watcher.encrypt_sensitive_data', 'true' } + if (version.onOrAfter('6.6.0')) { + setting 'ccr.auto_follow.wait_for_metadata_timeout', '1s' + } + // Old versions of the code contain an invalid assertion that trips // during tests. Versions 5.6.9 and 6.2.4 have been fixed by removing // the assertion, but this is impossible for released versions. diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index e3bbf6e613f4e..664e5f715bbb1 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -32,7 +32,7 @@ integTestCluster { // This is important, so that all the modules are available too. // There are index templates that use token filters that are in analysis-module and // processors are being used that are in ingest-common module. - distribution = 'zip' + distribution = 'default' setupCommand 'setupDummyUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser'