From a6abb28abf844fa46ca966dcbfcee99ee019f7b6 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 24 Jan 2019 10:02:48 -0500 Subject: [PATCH 01/64] Fix InternalEngineTests#assertOpsOnPrimary (#37746) The assertion `assertOpsOnPrimary` does not store seq_no and primary term of successful deletes to the `lastOpSeqNo` and `lastOpTerm`. This leads to failures of the subsequence CAS deletes or indexes with seq_no and term. Moreover, this assertion trips a translog assertion because it bumps the primary term of some operations but not the primary term of the engine. Relates #36467 Closes #37684 --- .../index/engine/InternalEngineTests.java | 32 +++++++++++++------ .../index/engine/EngineTestCase.java | 25 +++++++++++++-- 2 files changed, 45 insertions(+), 12 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 796d7eb0c60ec..edf1925fdd798 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1792,10 +1792,13 @@ public void testVersionOnPrimaryWithConcurrentRefresh() throws Exception { } }); refreshThread.start(); - latch.await(); - assertOpsOnPrimary(ops, Versions.NOT_FOUND, true, engine); - running.set(false); - refreshThread.join(); + try { + latch.await(); + assertOpsOnPrimary(ops, Versions.NOT_FOUND, true, engine); + } finally { + running.set(false); + refreshThread.join(); + } } private int assertOpsOnPrimary(List ops, long currentOpVersion, boolean docDeleted, InternalEngine engine) @@ -1805,7 +1808,7 @@ private int assertOpsOnPrimary(List ops, long currentOpVersion long lastOpVersion = currentOpVersion; long lastOpSeqNo = UNASSIGNED_SEQ_NO; long lastOpTerm = UNASSIGNED_PRIMARY_TERM; - final AtomicLong currentTerm = new AtomicLong(1); + PrimaryTermSupplier currentTerm = (PrimaryTermSupplier) engine.engineConfig.getPrimaryTermSupplier(); BiFunction indexWithVersion = (version, index) -> new Engine.Index(index.uid(), index.parsedDoc(), UNASSIGNED_SEQ_NO, currentTerm.get(), version, index.versionType(), index.origin(), index.startTime(), index.getAutoGeneratedIdTimestamp(), index.isRetry(), UNASSIGNED_SEQ_NO, 0); @@ -1818,6 +1821,12 @@ private int assertOpsOnPrimary(List ops, long currentOpVersion TriFunction delWithSeq = (seqNo, term, delete) -> new Engine.Delete(delete.type(), delete.id(), delete.uid(), UNASSIGNED_SEQ_NO, currentTerm.get(), delete.version(), delete.versionType(), delete.origin(), delete.startTime(), seqNo, term); + Function indexWithCurrentTerm = index -> new Engine.Index(index.uid(), + index.parsedDoc(), UNASSIGNED_SEQ_NO, currentTerm.get(), index.version(), index.versionType(), index.origin(), + index.startTime(), index.getAutoGeneratedIdTimestamp(), index.isRetry(), index.getIfSeqNo(), index.getIfPrimaryTerm()); + Function deleteWithCurrentTerm = delete -> new Engine.Delete(delete.type(), + delete.id(), delete.uid(), UNASSIGNED_SEQ_NO, currentTerm.get(), delete.version(), delete.versionType(), delete.origin(), + delete.startTime(), delete.getIfSeqNo(), delete.getIfPrimaryTerm()); for (Engine.Operation op : ops) { final boolean versionConflict = rarely(); final boolean versionedOp = versionConflict || randomBoolean(); @@ -1829,7 +1838,8 @@ private int assertOpsOnPrimary(List ops, long currentOpVersion lastOpSeqNo; final long conflictingTerm = conflictingSeqNo == lastOpSeqNo || randomBoolean() ? lastOpTerm + 1 : lastOpTerm; if (rarely()) { - currentTerm.incrementAndGet(); + currentTerm.set(currentTerm.get() + 1L); + engine.rollTranslogGeneration(); } final long correctVersion = docDeleted && randomBoolean() ? Versions.MATCH_DELETED : lastOpVersion; logger.info("performing [{}]{}{}", @@ -1860,7 +1870,7 @@ private int assertOpsOnPrimary(List ops, long currentOpVersion result = engine.index(indexWithVersion.apply(correctVersion, index)); } } else { - result = engine.index(index); + result = engine.index(indexWithCurrentTerm.apply(index)); } assertThat(result.isCreated(), equalTo(docDeleted)); assertThat(result.getVersion(), equalTo(Math.max(lastOpVersion + 1, 1))); @@ -1894,7 +1904,7 @@ private int assertOpsOnPrimary(List ops, long currentOpVersion } else if (versionedOp) { result = engine.delete(delWithVersion.apply(correctVersion, delete)); } else { - result = engine.delete(delete); + result = engine.delete(deleteWithCurrentTerm.apply(delete)); } assertThat(result.isFound(), equalTo(docDeleted == false)); assertThat(result.getVersion(), equalTo(Math.max(lastOpVersion + 1, 1))); @@ -1902,8 +1912,8 @@ private int assertOpsOnPrimary(List ops, long currentOpVersion assertThat(result.getFailure(), nullValue()); docDeleted = true; lastOpVersion = result.getVersion(); - lastOpSeqNo = UNASSIGNED_SEQ_NO; - lastOpTerm = UNASSIGNED_PRIMARY_TERM; + lastOpSeqNo = result.getSeqNo(); + lastOpTerm = result.getTerm(); opsPerformed++; } } @@ -1931,6 +1941,8 @@ private int assertOpsOnPrimary(List ops, long currentOpVersion engine.clearDeletedTombstones(); if (docDeleted) { lastOpVersion = Versions.NOT_FOUND; + lastOpSeqNo = UNASSIGNED_SEQ_NO; + lastOpTerm = UNASSIGNED_PRIMARY_TERM; } } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 8b463f33b9081..1e3dbef92c30a 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -149,7 +149,7 @@ public abstract class EngineTestCase extends ESTestCase { protected Path primaryTranslogDir; protected Path replicaTranslogDir; // A default primary term is used by engine instances created in this test. - protected AtomicLong primaryTerm = new AtomicLong(); + protected final PrimaryTermSupplier primaryTerm = new PrimaryTermSupplier(0L); protected static void assertVisibleCount(Engine engine, int numDocs) throws IOException { assertVisibleCount(engine, numDocs, true); @@ -682,7 +682,7 @@ public EngineConfig config( breakerService, globalCheckpointSupplier, retentionLeasesSupplier, - primaryTerm::get, + primaryTerm, tombstoneDocSupplier()); } @@ -1081,4 +1081,25 @@ public static Translog getTranslog(Engine engine) { InternalEngine internalEngine = (InternalEngine) engine; return internalEngine.getTranslog(); } + + public static final class PrimaryTermSupplier implements LongSupplier { + private final AtomicLong term; + + PrimaryTermSupplier(long initialTerm) { + this.term = new AtomicLong(initialTerm); + } + + public long get() { + return term.get(); + } + + public void set(long newTerm) { + this.term.set(newTerm); + } + + @Override + public long getAsLong() { + return get(); + } + } } From 265710e6582ec7943af56b47203a1c5dbaf19dc4 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Thu, 24 Jan 2019 17:05:01 +0200 Subject: [PATCH 02/64] Better msg on unmapped principal attribute (#37805) When we can't map the principal attribute from the configured SAML attribute in the realm settings, we can't complete the authentication. We return an error to the user indicating this and we present them with a list of attributes we did get from the SAML response to point out that the expected one was not part of that list. This list will never contain the NameIDs though as they are not part of the SAMLAttribute list. So we might have a NameID but just with a different format. --- .../elasticsearch/xpack/security/authc/saml/SamlRealm.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java index e93d1aa8f1491..be45ff76ec48f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java @@ -417,8 +417,9 @@ public void authenticate(AuthenticationToken authenticationToken, ActionListener private void buildUser(SamlAttributes attributes, ActionListener baseListener) { final String principal = resolveSingleValueAttribute(attributes, principalAttribute, PRINCIPAL_ATTRIBUTE.name(config)); if (Strings.isNullOrEmpty(principal)) { - baseListener.onResponse(AuthenticationResult.unsuccessful( - principalAttribute + " not found in " + attributes.attributes(), null)); + final String msg = + principalAttribute + " not found in saml attributes" + attributes.attributes() + " or NameID [" + attributes.name() + "]"; + baseListener.onResponse(AuthenticationResult.unsuccessful(msg, null)); return; } From bd02ca4b7b066de1e77cf9246c5a1674af1ea71a Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 24 Jan 2019 15:17:13 +0000 Subject: [PATCH 03/64] Mute NoMasterNodeIT testNoMasterActionsWriteMasterBlock Due to https://github.com/elastic/elasticsearch/issues/37823 --- .../src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java index 60c3bbee87a7a..5b76359576e92 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -192,6 +192,7 @@ void checkWriteAction(ActionRequestBuilder builder) { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37823") public void testNoMasterActionsWriteMasterBlock() throws Exception { Settings settings = Settings.builder() .put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), false) From 4e08cca6bc1e63cb72ba5301f50decb4ce52a615 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Thu, 24 Jan 2019 17:26:42 +0200 Subject: [PATCH 04/64] Ground work to start up the docker image in the build (#37754) This change adds a docker compose configuration that's used with the `elasticsearch.test.fixtures` plugin to start up the image and check that the TCP ports are up. We can build on this to add other checks for culster health, run REST tests, etc. We can add multiple containers and configurations to the compose file (e.x. test different env vars) and form clusters. --- .../testfixtures/TestFixtureExtension.java | 1 - .../testfixtures/TestFixturesPlugin.java | 92 +++++++++---------- distribution/docker/build.gradle | 17 ++++ distribution/docker/docker-compose.yml | 21 +++++ x-pack/test/smb-fixture/build.gradle | 2 + 5 files changed, 85 insertions(+), 48 deletions(-) create mode 100644 distribution/docker/docker-compose.yml diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java index edc0fd09f1677..b4ddcf0bed183 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixtureExtension.java @@ -44,5 +44,4 @@ public void useFixture(String path) { fixtures.add(fixtureProject); } - } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index 32a50fb4b0750..35a7eacf1fde1 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -20,6 +20,7 @@ import com.avast.gradle.dockercompose.ComposeExtension; import com.avast.gradle.dockercompose.DockerComposePlugin; +import com.avast.gradle.dockercompose.tasks.ComposeUp; import org.elasticsearch.gradle.precommit.JarHellTask; import org.elasticsearch.gradle.precommit.TestingConventionsTasks; import org.elasticsearch.gradle.precommit.ThirdPartyAuditTask; @@ -54,7 +55,6 @@ public void apply(Project project) { // convenience boilerplate with build plugin // Can't reference tasks that are implemented in Groovy, use reflection instead disableTaskByType(tasks, getTaskClass("org.elasticsearch.gradle.precommit.LicenseHeadersTask")); - disableTaskByType(tasks, getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask")); disableTaskByType(tasks, ThirdPartyAuditTask.class); disableTaskByType(tasks, JarHellTask.class); @@ -62,7 +62,6 @@ public void apply(Project project) { Task preProcessFixture = project.getTasks().create("preProcessFixture"); buildFixture.dependsOn(preProcessFixture); Task postProcessFixture = project.getTasks().create("postProcessFixture"); - buildFixture.dependsOn(postProcessFixture); if (dockerComposeSupported(project) == false) { preProcessFixture.setEnabled(false); @@ -83,7 +82,7 @@ public void apply(Project project) { buildFixture.dependsOn(tasks.getByName("composeUp")); tasks.getByName("composeUp").mustRunAfter(preProcessFixture); - postProcessFixture.dependsOn("composeUp"); + postProcessFixture.dependsOn(buildFixture); configureServiceInfoForTask( postProcessFixture, @@ -91,38 +90,36 @@ public void apply(Project project) { (name, port) -> postProcessFixture.getExtensions() .getByType(ExtraPropertiesExtension.class).set(name, port) ); - } else { - extension.fixtures.all(fixtureProject -> project.evaluationDependsOn(fixtureProject.getPath())); - if (dockerComposeSupported(project) == false) { - project.getLogger().warn( - "Tests for {} require docker-compose at /usr/local/bin/docker-compose or /usr/bin/docker-compose " + - "but none could be found so these will be skipped", project.getPath() + } + + extension.fixtures.all(fixtureProject -> project.evaluationDependsOn(fixtureProject.getPath())); + if (dockerComposeSupported(project) == false) { + project.getLogger().warn( + "Tests for {} require docker-compose at /usr/local/bin/docker-compose or /usr/bin/docker-compose " + + "but none could be found so these will be skipped", project.getPath() + ); + disableTaskByType(tasks, getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask")); + // conventions are not honored when the tasks are disabled + disableTaskByType(tasks, TestingConventionsTasks.class); + disableTaskByType(tasks, ComposeUp.class); + return; + } + tasks.withType(getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask"), task -> + extension.fixtures.all(fixtureProject -> { + fixtureProject.getTasks().matching(it -> it.getName().equals("buildFixture")).all(buildFixture -> + task.dependsOn(buildFixture) ); - tasks.withType(getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask"), task -> - task.setEnabled(false) + fixtureProject.getTasks().matching(it -> it.getName().equals("composeDown")).all(composeDown -> + task.finalizedBy(composeDown) ); - // conventions are not honored when the tasks are disabled - tasks.withType(TestingConventionsTasks.class, task -> - task.setEnabled(false) + configureServiceInfoForTask( + task, + fixtureProject, + (name, port) -> setSystemProperty(task, name, port) ); - return; - } - tasks.withType(getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask"), task -> - extension.fixtures.all(fixtureProject -> { - fixtureProject.getTasks().matching(it->it.getName().equals("buildFixture")).all(buildFixture -> - task.dependsOn(buildFixture) - ); - fixtureProject.getTasks().matching(it->it.getName().equals("composeDown")).all(composeDown -> - task.finalizedBy(composeDown) - ); - configureServiceInfoForTask( - task, - fixtureProject, - (name, port) -> setSystemProperty(task, name, port) - ); - }) - ); - } + }) + ); + } private void configureServiceInfoForTask(Task task, Project fixtureProject, BiConsumer consumer) { @@ -131,23 +128,24 @@ private void configureServiceInfoForTask(Task task, Project fixtureProject, BiCo task.doFirst(theTask -> fixtureProject.getExtensions().getByType(ComposeExtension.class).getServicesInfos() .forEach((service, infos) -> { - theTask.getLogger().info( - "Port maps for {}\nTCP:{}\nUDP:{}\nexposed to {}", - fixtureProject.getPath(), - infos.getTcpPorts(), - infos.getUdpPorts(), - theTask.getPath() - ); infos.getTcpPorts() - .forEach((container, host) -> consumer.accept( - "test.fixtures." + service + ".tcp." + container, - host - )); + .forEach((container, host) -> { + String name = "test.fixtures." + service + ".tcp." + container; + theTask.getLogger().info("port mapping property: {}={}", name, host); + consumer.accept( + name, + host + ); + }); infos.getUdpPorts() - .forEach((container, host) -> consumer.accept( - "test.fixtures." + service + ".udp." + container, - host - )); + .forEach((container, host) -> { + String name = "test.fixtures." + service + ".udp." + container; + theTask.getLogger().info("port mapping property: {}={}", name, host); + consumer.accept( + name, + host + ); + }); }) ); } diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index 219d81e1117ec..4d228e3c44fdc 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -4,6 +4,7 @@ import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.VersionProperties apply plugin: 'base' +apply plugin: 'elasticsearch.test.fixtures' configurations { dockerPlugins @@ -68,6 +69,22 @@ void addCopyDockerfileTask(final boolean oss) { } } + +preProcessFixture { + dependsOn taskName("copy", true, "DockerContext") + dependsOn taskName("copy", true, "Dockerfile") + dependsOn taskName("copy", false, "DockerContext") + dependsOn taskName("copy", false, "Dockerfile") +} + +postProcessFixture.doLast { + println "docker default distro is on port: ${ext."test.fixtures.elasticsearch-default.tcp.9200"}, " + + "oss is on: ${ext."test.fixtures.elasticsearch-oss.tcp.9200"}" +} + +// TODO: Add some actual tests, this will just check that the TPC port in the container is up +check.dependsOn postProcessFixture + void addBuildDockerImage(final boolean oss) { final Task buildDockerImageTask = task(taskName("build", oss, "DockerImage"), type: LoggedExec) { dependsOn taskName("copy", oss, "DockerContext") diff --git a/distribution/docker/docker-compose.yml b/distribution/docker/docker-compose.yml new file mode 100644 index 0000000000000..3f220aa9e91c0 --- /dev/null +++ b/distribution/docker/docker-compose.yml @@ -0,0 +1,21 @@ +# Only used for testing the docker images +version: '3' +services: + elasticsearch-default: + build: + context: ./build/docker + dockerfile: Dockerfile + environment: + - cluster.name=elasticsearch-default + - discovery.type=single-node + ports: + - "9200" + elasticsearch-oss: + build: + context: ./build/oss-docker + dockerfile: Dockerfile + environment: + - cluster.name=elasticsearch-oss + - discovery.type=single-node + ports: + - "9200" diff --git a/x-pack/test/smb-fixture/build.gradle b/x-pack/test/smb-fixture/build.gradle index c361737e22c6d..846c38829870a 100644 --- a/x-pack/test/smb-fixture/build.gradle +++ b/x-pack/test/smb-fixture/build.gradle @@ -1,2 +1,4 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.test.fixtures' + +unitTest.enabled = false From ddee1926f977372b307f15adafda043fe5fd6fa7 Mon Sep 17 00:00:00 2001 From: niloct Date: Thu, 24 Jan 2019 13:24:42 -0200 Subject: [PATCH 05/64] Update update-by-query.asciidoc (#37555) Similar fix as #37370. --- docs/reference/docs/update-by-query.asciidoc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 096e4371be99a..a01bd30e4280a 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -444,8 +444,9 @@ POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel The task ID can be found using the <>. -Cancellation should happen quickly but might take a few seconds. The task status -API above will continue to list the task until it is wakes to cancel itself. +Cancellation should happen quickly but might take a few seconds. The task status +API above will continue to list the update by query task until this task checks +that it has been cancelled and terminates itself. [float] From e7aa7e909a370a54613e86592d58219c7326faaf Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Thu, 24 Jan 2019 17:30:24 +0200 Subject: [PATCH 06/64] Switch testclusters to use nio (#37365) --- .../testclusters/ElasticsearchNode.java | 158 ++++++++---------- 1 file changed, 71 insertions(+), 87 deletions(-) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 6cb8f2bd2f220..56351f0c53462 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -27,13 +27,14 @@ import java.io.BufferedReader; import java.io.File; -import java.io.FileReader; import java.io.IOException; import java.io.InputStreamReader; +import java.io.UncheckedIOException; import java.net.HttpURLConnection; import java.net.URL; import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.nio.file.Path; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.List; @@ -45,6 +46,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Objects.requireNonNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -56,8 +58,8 @@ public class ElasticsearchNode { private final String name; private final GradleServicesAdapter services; private final AtomicBoolean configurationFrozen = new AtomicBoolean(false); - private final File artifactsExtractDir; - private final File workingDir; + private final Path artifactsExtractDir; + private final Path workingDir; private static final int ES_DESTROY_TIMEOUT = 20; private static final TimeUnit ES_DESTROY_TIMEOUT_UNIT = TimeUnit.SECONDS; @@ -65,6 +67,15 @@ public class ElasticsearchNode { private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.SECONDS; private final LinkedHashMap> waitConditions; + private final Path confPathRepo; + private final Path configFile; + private final Path confPathData; + private final Path confPathLogs; + private final Path transportPortFile; + private final Path httpPortsFile; + private final Path esStdoutFile; + private final Path esStderrFile; + private Distribution distribution; private String version; private File javaHome; @@ -75,11 +86,19 @@ public class ElasticsearchNode { this.path = path; this.name = name; this.services = services; - this.artifactsExtractDir = artifactsExtractDir; - this.workingDir = new File(workingDirBase, safeName(name)); + this.artifactsExtractDir = artifactsExtractDir.toPath(); + this.workingDir = workingDirBase.toPath().resolve(safeName(name)).toAbsolutePath(); + confPathRepo = workingDir.resolve("repo"); + configFile = workingDir.resolve("config/elasticsearch.yml"); + confPathData = workingDir.resolve("data"); + confPathLogs = workingDir.resolve("logs"); + transportPortFile = confPathLogs.resolve("transport.ports"); + httpPortsFile = confPathLogs.resolve("http.ports"); + esStdoutFile = confPathLogs.resolve("es.stdout.log"); + esStderrFile = confPathLogs.resolve("es.stderr.log"); this.waitConditions = new LinkedHashMap<>(); - waitConditions.put("http ports file", node -> node.getHttpPortsFile().exists()); - waitConditions.put("transport ports file", node -> node.getTransportPortFile().exists()); + waitConditions.put("http ports file", node -> Files.exists(node.httpPortsFile)); + waitConditions.put("transport ports file", node -> Files.exists(node.transportPortFile)); waitForUri("cluster health yellow", "/_cluster/health?wait_for_nodes=>=1&wait_for_status=yellow"); } @@ -149,39 +168,39 @@ private void waitForUri(String description, String uri) { synchronized void start() { logger.info("Starting `{}`", this); - File distroArtifact = new File( - new File(artifactsExtractDir, distribution.getFileExtension()), - distribution.getFileName() + "-" + getVersion() - ); - if (distroArtifact.exists() == false) { + Path distroArtifact = artifactsExtractDir + .resolve(distribution.getFileExtension()) + .resolve(distribution.getFileName() + "-" + getVersion()); + + if (Files.exists(distroArtifact) == false) { throw new TestClustersException("Can not start " + this + ", missing: " + distroArtifact); } - if (distroArtifact.isDirectory() == false) { + if (Files.isDirectory(distroArtifact) == false) { throw new TestClustersException("Can not start " + this + ", is not a directory: " + distroArtifact); } services.sync(spec -> { - spec.from(new File(distroArtifact, "config")); - spec.into(getConfigFile().getParent()); + spec.from(distroArtifact.resolve("config").toFile()); + spec.into(configFile.getParent()); }); configure(); startElasticsearchProcess(distroArtifact); } - private void startElasticsearchProcess(File distroArtifact) { + private void startElasticsearchProcess(Path distroArtifact) { logger.info("Running `bin/elasticsearch` in `{}` for {}", workingDir, this); final ProcessBuilder processBuilder = new ProcessBuilder(); if (OperatingSystem.current().isWindows()) { processBuilder.command( "cmd", "/c", - new File(distroArtifact, "\\bin\\elasticsearch.bat").getAbsolutePath() + distroArtifact.resolve("\\bin\\elasticsearch.bat").toAbsolutePath().toString() ); } else { processBuilder.command( - new File(distroArtifact.getAbsolutePath(), "bin/elasticsearch").getAbsolutePath() + distroArtifact.resolve("bin/elasticsearch").toAbsolutePath().toString() ); } try { - processBuilder.directory(workingDir); + processBuilder.directory(workingDir.toFile()); Map environment = processBuilder.environment(); // Don't inherit anything from the environment for as that would lack reproductability environment.clear(); @@ -195,11 +214,11 @@ private void startElasticsearchProcess(File distroArtifact) { } else { logger.warn("{}: No javaHome configured, will rely on default java detection", this); } - environment.put("ES_PATH_CONF", getConfigFile().getParentFile().getAbsolutePath()); + environment.put("ES_PATH_CONF", configFile.getParent().toAbsolutePath().toString()); environment.put("ES_JAVA_OPTIONS", "-Xms512m -Xmx512m"); // don't buffer all in memory, make sure we don't block on the default pipes - processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(getStdErrFile())); - processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(getStdoutFile())); + processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(esStderrFile.toFile())); + processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(esStdoutFile.toFile())); esProcess = processBuilder.start(); } catch (IOException e) { throw new TestClustersException("Failed to start ES process for " + this, e); @@ -226,8 +245,8 @@ synchronized void stop(boolean tailLogs) { requireNonNull(esProcess, "Can't stop `" + this + "` as it was not started or already stopped."); stopHandle(esProcess.toHandle()); if (tailLogs) { - logFileContents("Standard output of node", getStdoutFile()); - logFileContents("Standard error of node", getStdErrFile()); + logFileContents("Standard output of node", esStdoutFile); + logFileContents("Standard error of node", esStderrFile); } esProcess = null; } @@ -265,14 +284,14 @@ private void logProcessInfo(String prefix, ProcessHandle.Info info) { ); } - private void logFileContents(String description, File from) { + private void logFileContents(String description, Path from) { logger.error("{} `{}`", description, this); - try (BufferedReader reader = new BufferedReader(new FileReader(from))) { - reader.lines() + try(Stream lines = Files.lines(from, StandardCharsets.UTF_8)) { + lines .map(line -> " [" + name + "]" + line) .forEach(logger::error); } catch (IOException e) { - throw new TestClustersException("Error reading " + description, e); + throw new UncheckedIOException(e); } } @@ -289,48 +308,25 @@ private void waitForProcessToExit(ProcessHandle processHandle) { } } - private File getConfigFile() { - return new File(workingDir, "config/elasticsearch.yml"); - } - - private File getConfPathData() { - return new File(workingDir, "data"); - } - - private File getConfPathSharedData() { - return new File(workingDir, "sharedData"); - } - - private File getConfPathRepo() { - return new File(workingDir, "repo"); - } - - private File getConfPathLogs() { - return new File(workingDir, "logs"); - } - - private File getStdoutFile() { - return new File(getConfPathLogs(), "es.stdout.log"); - } - - private File getStdErrFile() { - return new File(getConfPathLogs(), "es.stderr.log"); - } + private void configure() { + try { + Files.createDirectories(configFile.getParent()); + Files.createDirectories(confPathRepo); + Files.createDirectories(confPathData); + Files.createDirectories(confPathLogs); + } catch (IOException e) { + throw new UncheckedIOException(e); + } - private void configure() { - getConfigFile().getParentFile().mkdirs(); - getConfPathRepo().mkdirs(); - getConfPathData().mkdirs(); - getConfPathSharedData().mkdirs(); - getConfPathLogs().mkdirs(); LinkedHashMap config = new LinkedHashMap<>(); + String nodeName = safeName(name); config.put("cluster.name",nodeName); config.put("node.name", nodeName); - config.put("path.repo", getConfPathRepo().getAbsolutePath()); - config.put("path.data", getConfPathData().getAbsolutePath()); - config.put("path.logs", getConfPathLogs().getAbsolutePath()); - config.put("path.shared_data", getConfPathSharedData().getAbsolutePath()); + config.put("path.repo", confPathRepo.toAbsolutePath().toString()); + config.put("path.data", confPathData.toAbsolutePath().toString()); + config.put("path.logs", confPathLogs.toAbsolutePath().toString()); + config.put("path.shared_data", workingDir.resolve("sharedData").toString()); config.put("node.attr.testattr", "test"); config.put("node.portsfile", "true"); config.put("http.port", "0"); @@ -348,16 +344,16 @@ private void configure() { } try { Files.write( - getConfigFile().toPath(), + configFile, config.entrySet().stream() .map(entry -> entry.getKey() + ": " + entry.getValue()) .collect(Collectors.joining("\n")) .getBytes(StandardCharsets.UTF_8) ); } catch (IOException e) { - throw new TestClustersException("Could not write config file: " + getConfigFile(), e); + throw new UncheckedIOException("Could not write config file: " + configFile, e); } - logger.info("Written config file:{} for {}", getConfigFile(), this); + logger.info("Written config file:{} for {}", configFile, this); } private void checkFrozen() { @@ -372,41 +368,29 @@ private static String safeName(String name) { .replaceAll("[^a-zA-Z0-9]+", "-"); } - private File getHttpPortsFile() { - return new File(getConfPathLogs(), "http.ports"); - } - - private File getTransportPortFile() { - return new File(getConfPathLogs(), "transport.ports"); - } - private List getTransportPortInternal() { - File transportPortFile = getTransportPortFile(); try { - return readPortsFile(getTransportPortFile()); + return readPortsFile(transportPortFile); } catch (IOException e) { - throw new TestClustersException( + throw new UncheckedIOException( "Failed to read transport ports file: " + transportPortFile + " for " + this, e ); } } private List getHttpPortInternal() { - File httpPortsFile = getHttpPortsFile(); try { - return readPortsFile(getHttpPortsFile()); + return readPortsFile(httpPortsFile); } catch (IOException e) { - throw new TestClustersException( + throw new UncheckedIOException( "Failed to read http ports file: " + httpPortsFile + " for " + this, e ); } } - private List readPortsFile(File file) throws IOException { - try (BufferedReader reader = new BufferedReader(new FileReader(file))) { - return reader.lines() - .map(String::trim) - .collect(Collectors.toList()); + private List readPortsFile(Path file) throws IOException { + try(Stream lines = Files.lines(file, StandardCharsets.UTF_8)) { + return lines.map(String::trim).collect(Collectors.toList()); } } From 37768b7eac24ad226dc0e4b0d34614d98b33fcd2 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Thu, 24 Jan 2019 17:30:50 +0200 Subject: [PATCH 07/64] Testing conventions now checks for tests in main (#37321) * Testing conventions now checks for tests in main This is the last outstanding feature of the old NamingConventionsTask, so time to remove it. * PR review --- buildSrc/build.gradle | 5 - .../gradle/plugin/PluginBuildPlugin.groovy | 5 - .../gradle/precommit/PrecommitTasks.groovy | 10 - .../precommit/NamingConventionsTask.java | 167 --------- .../precommit/TestingConventionsTasks.java | 47 ++- .../test/NamingConventionsCheck.java | 318 ------------------ .../precommit/NamingConventionsTaskIT.java | 58 ---- .../precommit/TestingConventionsTasksIT.java | 11 + buildSrc/src/testKit/jarHell/build.gradle | 1 - .../namingConventionsSelfTest/build.gradle | 24 -- .../namingConventionsSelfTest/settings.gradle | 0 .../NamingConventionsCheckBadClasses.java | 62 ---- .../test/NamingConventionsCheckInMainIT.java | 31 -- .../testKit/testingConventions/build.gradle | 4 + .../testingConventions/settings.gradle | 3 +- .../gradle/testkit/NamingConventionIT.java} | 9 +- .../testkit/NamingConventionTests.java} | 7 +- client/rest/build.gradle | 6 - client/sniffer/build.gradle | 6 - client/test/build.gradle | 2 - client/transport/build.gradle | 6 - .../tools/java-version-checker/build.gradle | 1 - distribution/tools/launchers/build.gradle | 5 - libs/secure-sm/build.gradle | 4 - qa/vagrant/build.gradle | 2 + test/framework/build.gradle | 6 - x-pack/qa/openldap-tests/build.gradle | 5 - x-pack/transport-client/build.gradle | 6 - 28 files changed, 66 insertions(+), 745 deletions(-) delete mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/precommit/NamingConventionsTask.java delete mode 100644 buildSrc/src/main/minimumRuntime/org/elasticsearch/test/NamingConventionsCheck.java delete mode 100644 buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java delete mode 100644 buildSrc/src/testKit/namingConventionsSelfTest/build.gradle delete mode 100644 buildSrc/src/testKit/namingConventionsSelfTest/settings.gradle delete mode 100644 buildSrc/src/testKit/namingConventionsSelfTest/src/main/java/org/elasticsearch/test/NamingConventionsCheckBadClasses.java delete mode 100644 buildSrc/src/testKit/namingConventionsSelfTest/src/test/java/org/elasticsearch/test/NamingConventionsCheckInMainIT.java rename buildSrc/src/testKit/{namingConventionsSelfTest/src/test/java/org/elasticsearch/test/WrongName.java => testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java} (81%) rename buildSrc/src/testKit/{namingConventionsSelfTest/src/test/java/org/elasticsearch/test/NamingConventionsCheckInMainTests.java => testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionTests.java} (84%) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index b98cdd8788bd4..0acdc95c86bbf 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -235,11 +235,6 @@ if (project != rootProject) { exclude '**/ForbiddenPatternsTask.java' } - namingConventions { - testClass = 'org.elasticsearch.gradle.test.GradleUnitTestCase' - integTestClass = 'org.elasticsearch.gradle.test.GradleIntegrationTestCase' - } - testingConventions { naming.clear() naming { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 306ac4a05e87a..9f35c6b9e68ba 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -72,11 +72,6 @@ public class PluginBuildPlugin extends BuildPlugin { if (isModule == false || isXPackModule) { addNoticeGeneration(project) } - - project.namingConventions { - // Plugins declare integration tests as "Tests" instead of IT. - skipIntegTestInDisguise = true - } } project.testingConventions { naming.clear() diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 28c86a28f713c..b2a9663cf7ace 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -43,7 +43,6 @@ class PrecommitTasks { List precommitTasks = [ configureCheckstyle(project), configureForbiddenApisCli(project), - configureNamingConventions(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('filepermissions', FilePermissionsTask.class), @@ -230,15 +229,6 @@ class PrecommitTasks { return checkstyleTask } - private static Task configureNamingConventions(Project project) { - if (project.sourceSets.findByName("test")) { - Task namingConventionsTask = project.tasks.create('namingConventions', NamingConventionsTask) - namingConventionsTask.javaHome = project.compilerJavaHome - return namingConventionsTask - } - return null - } - private static Task configureLoggerUsage(Project project) { project.configurations.create('loggerUsagePlugin') project.dependencies.add('loggerUsagePlugin', diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/NamingConventionsTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/NamingConventionsTask.java deleted file mode 100644 index b0e36982918fc..0000000000000 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/NamingConventionsTask.java +++ /dev/null @@ -1,167 +0,0 @@ -package org.elasticsearch.gradle.precommit; - -import org.elasticsearch.gradle.LoggedExec; -import org.elasticsearch.test.NamingConventionsCheck; -import org.gradle.api.GradleException; -import org.gradle.api.file.FileCollection; -import org.gradle.api.plugins.JavaPluginConvention; -import org.gradle.api.tasks.Classpath; -import org.gradle.api.tasks.Input; -import org.gradle.api.tasks.InputFiles; -import org.gradle.api.tasks.SkipWhenEmpty; -import org.gradle.api.tasks.SourceSetContainer; -import org.gradle.api.tasks.TaskAction; - -import java.io.File; -import java.net.URISyntaxException; -import java.net.URL; - -/** - * Runs NamingConventionsCheck on a classpath/directory combo to verify that - * tests are named according to our conventions so they'll be picked up by - * gradle. Read the Javadoc for NamingConventionsCheck to learn more. - */ -@SuppressWarnings("unchecked") -public class NamingConventionsTask extends PrecommitTask { - - public NamingConventionsTask() { - setDescription("Tests that test classes aren't misnamed or misplaced"); - dependsOn(getJavaSourceSets().getByName(checkForTestsInMain ? "main" : "test").getClassesTaskName()); - } - - @TaskAction - public void runNamingConventions() { - LoggedExec.javaexec(getProject(), spec -> { - spec.classpath( - getNamingConventionsCheckClassFiles(), - getSourceSetClassPath() - ); - spec.executable(getJavaHome() + "/bin/java"); - spec.jvmArgs("-Djna.nosys=true"); - spec.setMain(NamingConventionsCheck.class.getName()); - spec.args("--test-class", getTestClass()); - if (isSkipIntegTestInDisguise()) { - spec.args("--skip-integ-tests-in-disguise"); - } else { - spec.args("--integ-test-class", getIntegTestClass()); - } - if (isCheckForTestsInMain()) { - spec.args("--main"); - spec.args("--"); - } else { - spec.args("--"); - } - spec.args(getExistingClassesDirs().getAsPath()); - }); - } - - @Input - public Object getJavaHome() { - return javaHome; - } - - public void setJavaHome(Object javaHome) { - this.javaHome = javaHome; - } - - @Classpath - public FileCollection getSourceSetClassPath() { - SourceSetContainer sourceSets = getJavaSourceSets(); - return getProject().files( - sourceSets.getByName("test").getCompileClasspath(), - sourceSets.getByName("test").getOutput(), - checkForTestsInMain ? sourceSets.getByName("main").getRuntimeClasspath() : getProject().files() - ); - } - - @InputFiles - public File getNamingConventionsCheckClassFiles() { - // This works because the class only depends on one class from junit that will be available from the - // tests compile classpath. It's the most straight forward way of telling Java where to find the main - // class. - URL location = NamingConventionsCheck.class.getProtectionDomain().getCodeSource().getLocation(); - if (location.getProtocol().equals("file") == false) { - throw new GradleException("Unexpected location for NamingConventionCheck class: "+ location); - } - try { - return new File(location.toURI().getPath()); - } catch (URISyntaxException e) { - throw new AssertionError(e); - } - } - - @InputFiles - @SkipWhenEmpty - public FileCollection getExistingClassesDirs() { - FileCollection classesDirs = getJavaSourceSets().getByName(checkForTestsInMain ? "main" : "test") - .getOutput().getClassesDirs(); - return classesDirs.filter(it -> it.exists()); - } - - @Input - public boolean isSkipIntegTestInDisguise() { - return skipIntegTestInDisguise; - } - - public void setSkipIntegTestInDisguise(boolean skipIntegTestInDisguise) { - this.skipIntegTestInDisguise = skipIntegTestInDisguise; - } - - @Input - public String getTestClass() { - return testClass; - } - - public void setTestClass(String testClass) { - this.testClass = testClass; - } - - @Input - public String getIntegTestClass() { - return integTestClass; - } - - public void setIntegTestClass(String integTestClass) { - this.integTestClass = integTestClass; - } - - @Input - public boolean isCheckForTestsInMain() { - return checkForTestsInMain; - } - - public void setCheckForTestsInMain(boolean checkForTestsInMain) { - this.checkForTestsInMain = checkForTestsInMain; - } - - private SourceSetContainer getJavaSourceSets() { - return getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets(); - } - - /** - * The java home to run the check with - */ - private Object javaHome; // Make it an Object to allow for Groovy GString - - /** - * Should we skip the integ tests in disguise tests? Defaults to true because only core names its - * integ tests correctly. - */ - private boolean skipIntegTestInDisguise = false; - - /** - * Superclass for all tests. - */ - private String testClass = "org.apache.lucene.util.LuceneTestCase"; - - /** - * Superclass for all integration tests. - */ - private String integTestClass = "org.elasticsearch.test.ESIntegTestCase"; - - /** - * Should the test also check the main classpath for test classes instead of - * doing the usual checks to the test classpath. - */ - private boolean checkForTestsInMain = false; -} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java index 0f207ad3fe1af..04e1343f4ac92 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java @@ -27,6 +27,8 @@ import org.gradle.api.file.FileTree; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.TaskAction; import org.gradle.api.tasks.testing.Test; import org.gradle.api.tasks.util.PatternFilterable; @@ -122,6 +124,23 @@ public void naming(Closure action) { naming.configure(action); } + @Input + public Set getMainClassNamedLikeTests() { + SourceSetContainer javaSourceSets = Boilerplate.getJavaSourceSets(getProject()); + if (javaSourceSets.findByName(SourceSet.MAIN_SOURCE_SET_NAME) == null) { + // some test projects don't have a main source set + return Collections.emptySet(); + } + return javaSourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME) + .getOutput().getClassesDirs().getAsFileTree() + .getFiles().stream() + .filter(file -> file.getName().endsWith(".class")) + .map(File::getName) + .map(name -> name.substring(0, name.length() - 6)) + .filter(this::implementsNamingConvention) + .collect(Collectors.toSet()); + } + @TaskAction public void doCheck() throws IOException { final String problems; @@ -235,10 +254,12 @@ public void doCheck() throws IOException { ); }).sorted() .collect(Collectors.joining("\n")) - ) + ), // TODO: check that the testing tasks are included in the right task based on the name ( from the rule ) - // TODO: check to make sure that the main source set doesn't have classes that match - // the naming convention (just the names, don't load classes) + checkNoneExists( + "Classes matching the test naming convention should be in test not main", + getMainClassNamedLikeTests() + ) ); } @@ -296,6 +317,18 @@ private String checkNoneExists(String message, Stream> stream } } + private String checkNoneExists(String message, Set candidates) { + String problem = candidates.stream() + .map(each -> " * " + each) + .sorted() + .collect(Collectors.joining("\n")); + if (problem.isEmpty() == false) { + return message + ":\n" + problem; + } else { + return ""; + } + } + private String checkAtLeastOneExists(String message, Stream> stream) { if (stream.findAny().isPresent()) { return ""; @@ -337,10 +370,14 @@ private boolean seemsLikeATest(Class clazz) { } private boolean implementsNamingConvention(Class clazz) { + return implementsNamingConvention(clazz.getName()); + } + + private boolean implementsNamingConvention(String className) { if (naming.stream() .map(TestingConventionRule::getSuffix) - .anyMatch(suffix -> clazz.getName().endsWith(suffix))) { - getLogger().debug("{} is a test because it matches the naming convention", clazz.getName()); + .anyMatch(suffix -> className.endsWith(suffix))) { + getLogger().debug("{} is a test because it matches the naming convention", className); return true; } return false; diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/test/NamingConventionsCheck.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/test/NamingConventionsCheck.java deleted file mode 100644 index 17d885e21bcc2..0000000000000 --- a/buildSrc/src/main/minimumRuntime/org/elasticsearch/test/NamingConventionsCheck.java +++ /dev/null @@ -1,318 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test; - -import java.io.File; -import java.io.IOException; -import java.lang.reflect.Modifier; -import java.nio.file.FileVisitResult; -import java.nio.file.FileVisitor; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.attribute.BasicFileAttributes; -import java.util.HashSet; -import java.util.Objects; -import java.util.Set; -import java.util.regex.Pattern; - -/** - * Checks that all tests in a directory are named according to our naming conventions. This is important because tests that do not follow - * our conventions aren't run by gradle. This was once a glorious unit test but now that Elasticsearch is a multi-module project it must be - * a class with a main method so gradle can call it for each project. This has the advantage of allowing gradle to calculate when it is - * {@code UP-TO-DATE} so it can be skipped if the compiled classes haven't changed. This is useful on large modules for which checking all - * the modules can be slow. - */ -public class NamingConventionsCheck { - public static void main(String[] args) throws IOException { - Class testClass = null; - Class integTestClass = null; - String rootPathList = null; - boolean skipIntegTestsInDisguise = false; - boolean checkMainClasses = false; - for (int i = 0; i < args.length; i++) { - String arg = args[i]; - switch (arg) { - case "--test-class": - testClass = loadClassWithoutInitializing(args[++i]); - break; - case "--integ-test-class": - integTestClass = loadClassWithoutInitializing(args[++i]); - break; - case "--skip-integ-tests-in-disguise": - skipIntegTestsInDisguise = true; - break; - case "--main": - checkMainClasses = true; - break; - case "--": - rootPathList = args[++i]; - break; - default: - fail("unsupported argument '" + arg + "'"); - } - } - if (rootPathList == null) { - fail("No paths provided"); - return; - } - - NamingConventionsCheck check = new NamingConventionsCheck(testClass, integTestClass); - for (String rootDir : rootPathList.split(Pattern.quote(File.pathSeparator))) { - Path rootPath = Paths.get(rootDir); - if (checkMainClasses) { - check.checkMain(rootPath); - } else { - check.checkTests(rootPath, skipIntegTestsInDisguise); - } - } - - // Now we should have no violations - int exitCode = 0 ; - exitCode += countAndPrintViolations( - "Not all subclasses of " + check.testClass.getSimpleName() - + " match the naming convention. Concrete classes must end with [Tests]", - check.missingSuffix) ; - exitCode += countAndPrintViolations( - "Classes ending with [Tests] are abstract or interfaces", - check.notRunnable - ); - exitCode += countAndPrintViolations( - "Found inner classes that are tests, which are excluded from the test runner", - check.innerClasses - ); - exitCode += countAndPrintViolations( - "Pure Unit-Test found must subclass [" + check.testClass.getSimpleName() + "]", - check.pureUnitTest - ); - exitCode += countAndPrintViolations( - "Classes ending with [Tests] must subclass [" + check.testClass.getSimpleName() + "]", - check.notImplementing - ); - exitCode += countAndPrintViolations( - "Classes ending with [Tests] or [IT] or extending [" + - check.testClass.getSimpleName() + "] must be in src/test/java", - check.testsInMain - ); - if (skipIntegTestsInDisguise == false) { - exitCode += countAndPrintViolations("Subclasses of " + check.integTestClass.getSimpleName() + - " should end with IT as they are integration tests", - check.integTestsInDisguise - ); - } - System.exit(exitCode); - } - - private final Set> notImplementing = new HashSet<>(); - private final Set> pureUnitTest = new HashSet<>(); - private final Set> missingSuffix = new HashSet<>(); - private final Set> integTestsInDisguise = new HashSet<>(); - private final Set> notRunnable = new HashSet<>(); - private final Set> innerClasses = new HashSet<>(); - private final Set> testsInMain = new HashSet<>(); - - private final Class testClass; - private final Class integTestClass; - - public NamingConventionsCheck(Class testClass, Class integTestClass) { - this.testClass = Objects.requireNonNull(testClass, "--test-class is required"); - this.integTestClass = integTestClass; - } - - public void checkTests(Path rootPath, boolean skipTestsInDisguised) throws IOException { - Files.walkFileTree(rootPath, new TestClassVisitor() { - @Override - protected void visitTestClass(Class clazz) { - if (skipTestsInDisguised == false && - integTestClass.isAssignableFrom(clazz) && - clazz != integTestClass) { - integTestsInDisguise.add(clazz); - } - if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) { - notRunnable.add(clazz); - } else if (isTestCase(clazz) == false) { - notImplementing.add(clazz); - } else if (Modifier.isStatic(clazz.getModifiers())) { - innerClasses.add(clazz); - } - } - - @Override - protected void visitIntegrationTestClass(Class clazz) { - if (isTestCase(clazz) == false) { - notImplementing.add(clazz); - } - } - - @Override - protected void visitOtherClass(Class clazz) { - if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) { - return; - } - if (isTestCase(clazz)) { - missingSuffix.add(clazz); - } else if (junit.framework.Test.class.isAssignableFrom(clazz)) { - pureUnitTest.add(clazz); - } - } - }); - } - - public void checkMain(Path rootPath) throws IOException { - Files.walkFileTree(rootPath, new TestClassVisitor() { - @Override - protected void visitTestClass(Class clazz) { - testsInMain.add(clazz); - } - - @Override - protected void visitIntegrationTestClass(Class clazz) { - testsInMain.add(clazz); - } - - @Override - protected void visitOtherClass(Class clazz) { - if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) { - return; - } - if (isTestCase(clazz)) { - testsInMain.add(clazz); - } - } - }); - - } - - private static int countAndPrintViolations(String message, Set> set) { - if (false == set.isEmpty()) { - System.err.println(message + ":"); - for (Class bad : set) { - System.err.println(" * " + bad.getName()); - } - return 1; - } - return 0; - } - - /** - * Fail the process if we didn't detect a particular violation. Named to look like a junit assertion even though it isn't because it is - * similar enough. - */ - private static void assertViolation(String className, Set> set) { - className = className.startsWith("org") ? className : "org.elasticsearch.test.NamingConventionsCheckBadClasses$" + className; - if (false == set.remove(loadClassWithoutInitializing(className))) { - System.err.println("Error in NamingConventionsCheck! Expected [" + className + "] to be a violation but wasn't."); - System.exit(1); - } - } - - /** - * Fail the process with the provided message. - */ - private static void fail(String reason) { - System.err.println(reason); - System.exit(1); - } - - static Class loadClassWithoutInitializing(String name) { - try { - return Class.forName(name, - // Don't initialize the class to save time. Not needed for this test and this doesn't share a VM with any other tests. - false, - // Use our classloader rather than the bootstrap class loader. - NamingConventionsCheck.class.getClassLoader()); - } catch (ClassNotFoundException e) { - throw new RuntimeException(e); - } - } - - abstract class TestClassVisitor implements FileVisitor { - /** - * The package name of the directory we are currently visiting. Kept as a string rather than something fancy because we load - * just about every class and doing so requires building a string out of it anyway. At least this way we don't need to build the - * first part of the string over and over and over again. - */ - private String packageName; - - /** - * Visit classes named like a test. - */ - protected abstract void visitTestClass(Class clazz); - - /** - * Visit classes named like an integration test. - */ - protected abstract void visitIntegrationTestClass(Class clazz); - - /** - * Visit classes not named like a test at all. - */ - protected abstract void visitOtherClass(Class clazz); - @Override - public final FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { - // First we visit the root directory - if (packageName == null) { - // And it package is empty string regardless of the directory name - packageName = ""; - } else { - packageName += dir.getFileName() + "."; - } - return FileVisitResult.CONTINUE; - } - - @Override - public final FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { - // Go up one package by jumping back to the second to last '.' - packageName = packageName.substring(0, 1 + packageName.lastIndexOf('.', packageName.length() - 2)); - return FileVisitResult.CONTINUE; - } - - @Override - public final FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - String filename = file.getFileName().toString(); - if (filename.endsWith(".class")) { - String className = filename.substring(0, filename.length() - ".class".length()); - Class clazz = loadClassWithoutInitializing(packageName + className); - if (clazz.getName().endsWith("Tests")) { - visitTestClass(clazz); - } else if (clazz.getName().endsWith("IT")) { - visitIntegrationTestClass(clazz); - } else { - visitOtherClass(clazz); - } - } - return FileVisitResult.CONTINUE; - } - - /** - * Is this class a test case? - */ - protected boolean isTestCase(Class clazz) { - return testClass.isAssignableFrom(clazz); - } - - @Override - public final FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException { - throw exc; - } - - } - -} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java deleted file mode 100644 index 745c63cd4dc89..0000000000000 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java +++ /dev/null @@ -1,58 +0,0 @@ -package org.elasticsearch.gradle.precommit; - -import org.elasticsearch.gradle.test.GradleIntegrationTestCase; -import org.gradle.testkit.runner.BuildResult; - -import java.util.Arrays; -import java.util.HashSet; - -public class NamingConventionsTaskIT extends GradleIntegrationTestCase { - - public void testNameCheckFailsAsItShould() { - BuildResult result = getGradleRunner("namingConventionsSelfTest") - .withArguments("namingConventions", "-s", "-PcheckForTestsInMain=false") - .buildAndFail(); - - assertTaskFailed(result, ":namingConventions"); - assertOutputContains( - result.getOutput(), - // TODO: java9 Set.of - new HashSet<>( - Arrays.asList( - "Not all subclasses of UnitTestCase match the naming convention. Concrete classes must end with [Tests]:", - "* org.elasticsearch.test.WrongName", - "Found inner classes that are tests, which are excluded from the test runner:", - "* org.elasticsearch.test.NamingConventionsCheckInMainIT$InternalInvalidTests", - "Classes ending with [Tests] must subclass [UnitTestCase]:", - "* org.elasticsearch.test.NamingConventionsCheckInMainTests", - "* org.elasticsearch.test.NamingConventionsCheckInMainIT" - ) - ) - ); - } - - public void testNameCheckFailsAsItShouldWithMain() { - BuildResult result = getGradleRunner("namingConventionsSelfTest") - .withArguments("namingConventions", "-s", "-PcheckForTestsInMain=true") - .buildAndFail(); - - assertTaskFailed(result, ":namingConventions"); - assertOutputContains( - result.getOutput(), - // TODO: java9 Set.of - new HashSet<>( - Arrays.asList( - "Classes ending with [Tests] or [IT] or extending [UnitTestCase] must be in src/test/java:", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyInterfaceTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyInterfaceTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyAbstractTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$InnerTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$NotImplementingTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongNameTheSecond", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongName" - ) - ) - ); - } - -} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java index dbe06287782f7..39ab8a6734c58 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java @@ -105,4 +105,15 @@ public void testValidSetupWithBaseClass() { assertTaskSuccessful(result, ":valid_setup_with_base:testingConventions"); } + public void testTestsInMain() { + GradleRunner runner = getGradleRunner("testingConventions") + .withArguments("clean", ":tests_in_main:testingConventions", "-i", "-s"); + BuildResult result = runner.buildAndFail(); + assertOutputContains(result.getOutput(), + "Classes matching the test naming convention should be in test not main:", + " * NamingConventionIT", + " * NamingConventionTests" + ); + } + } diff --git a/buildSrc/src/testKit/jarHell/build.gradle b/buildSrc/src/testKit/jarHell/build.gradle index f96e8ac37e0cf..cd423c9f99f81 100644 --- a/buildSrc/src/testKit/jarHell/build.gradle +++ b/buildSrc/src/testKit/jarHell/build.gradle @@ -8,7 +8,6 @@ dependenciesInfo.enabled = false forbiddenApisMain.enabled = false forbiddenApisTest.enabled = false thirdPartyAudit.enabled = false -namingConventions.enabled = false ext.licenseFile = file("$buildDir/dummy/license") ext.noticeFile = file("$buildDir/dummy/notice") diff --git a/buildSrc/src/testKit/namingConventionsSelfTest/build.gradle b/buildSrc/src/testKit/namingConventionsSelfTest/build.gradle deleted file mode 100644 index b1c56ddc8048d..0000000000000 --- a/buildSrc/src/testKit/namingConventionsSelfTest/build.gradle +++ /dev/null @@ -1,24 +0,0 @@ -plugins { - id 'java' - id 'elasticsearch.build' -} - -dependencyLicenses.enabled = false -dependenciesInfo.enabled = false -forbiddenApisMain.enabled = false -forbiddenApisTest.enabled = false -jarHell.enabled = false -thirdPartyAudit.enabled = false - -ext.licenseFile = file("$buildDir/dummy/license") -ext.noticeFile = file("$buildDir/dummy/notice") - -dependencies { - compile "junit:junit:4.12" -} - -namingConventions { - checkForTestsInMain = project.property("checkForTestsInMain") == "true" - testClass = 'org.elasticsearch.test.NamingConventionsCheckBadClasses$UnitTestCase' - integTestClass = 'org.elasticsearch.test.NamingConventionsCheckBadClasses$IntegTestCase' -} diff --git a/buildSrc/src/testKit/namingConventionsSelfTest/settings.gradle b/buildSrc/src/testKit/namingConventionsSelfTest/settings.gradle deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/buildSrc/src/testKit/namingConventionsSelfTest/src/main/java/org/elasticsearch/test/NamingConventionsCheckBadClasses.java b/buildSrc/src/testKit/namingConventionsSelfTest/src/main/java/org/elasticsearch/test/NamingConventionsCheckBadClasses.java deleted file mode 100644 index 4fc88b3afc530..0000000000000 --- a/buildSrc/src/testKit/namingConventionsSelfTest/src/main/java/org/elasticsearch/test/NamingConventionsCheckBadClasses.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test; - -import junit.framework.TestCase; - -/** - * These inner classes all fail the NamingConventionsCheck. They have to live in the tests or else they won't be scanned. - */ -public class NamingConventionsCheckBadClasses { - public static final class NotImplementingTests { - } - - public static final class WrongName extends UnitTestCase { - /* - * Dummy test so the tests pass. We do this *and* skip the tests so anyone who jumps back to a branch without these tests can still - * compile without a failure. That is because clean doesn't actually clean these.... - */ - public void testDummy() {} - } - - public abstract static class DummyAbstractTests extends UnitTestCase { - } - - public interface DummyInterfaceTests { - } - - public static final class InnerTests extends UnitTestCase { - public void testDummy() {} - } - - public static final class WrongNameTheSecond extends UnitTestCase { - public void testDummy() {} - } - - public static final class PlainUnit extends TestCase { - public void testDummy() {} - } - - public abstract static class UnitTestCase extends TestCase { - } - - public abstract static class IntegTestCase extends UnitTestCase { - } -} diff --git a/buildSrc/src/testKit/namingConventionsSelfTest/src/test/java/org/elasticsearch/test/NamingConventionsCheckInMainIT.java b/buildSrc/src/testKit/namingConventionsSelfTest/src/test/java/org/elasticsearch/test/NamingConventionsCheckInMainIT.java deleted file mode 100644 index 438f80154191b..0000000000000 --- a/buildSrc/src/testKit/namingConventionsSelfTest/src/test/java/org/elasticsearch/test/NamingConventionsCheckInMainIT.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test; - -/** - * This class should fail the naming conventions self test. - */ -public class NamingConventionsCheckInMainIT { - - public static class InternalInvalidTests extends NamingConventionsCheckBadClasses.UnitTestCase { - - } - -} diff --git a/buildSrc/src/testKit/testingConventions/build.gradle b/buildSrc/src/testKit/testingConventions/build.gradle index d1a21a1ead0e7..0052245099112 100644 --- a/buildSrc/src/testKit/testingConventions/build.gradle +++ b/buildSrc/src/testKit/testingConventions/build.gradle @@ -67,6 +67,10 @@ project(':valid_setup_no_base') { } } +project(':tests_in_main') { + +} + project (':valid_setup_with_base') { task randomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) { include "**/*IT.class" diff --git a/buildSrc/src/testKit/testingConventions/settings.gradle b/buildSrc/src/testKit/testingConventions/settings.gradle index 2baec09d27c8e..35d73cfd204d1 100644 --- a/buildSrc/src/testKit/testingConventions/settings.gradle +++ b/buildSrc/src/testKit/testingConventions/settings.gradle @@ -4,4 +4,5 @@ include 'empty_test_task' include 'all_classes_in_tasks' include 'not_implementing_base' include 'valid_setup_no_base' -include 'valid_setup_with_base' \ No newline at end of file +include 'valid_setup_with_base' +include 'tests_in_main' \ No newline at end of file diff --git a/buildSrc/src/testKit/namingConventionsSelfTest/src/test/java/org/elasticsearch/test/WrongName.java b/buildSrc/src/testKit/testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java similarity index 81% rename from buildSrc/src/testKit/namingConventionsSelfTest/src/test/java/org/elasticsearch/test/WrongName.java rename to buildSrc/src/testKit/testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java index 64d6a237f8f4d..48a4f7adfd99e 100644 --- a/buildSrc/src/testKit/namingConventionsSelfTest/src/test/java/org/elasticsearch/test/WrongName.java +++ b/buildSrc/src/testKit/testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionIT.java @@ -16,11 +16,8 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.gradle.testkit; -package org.elasticsearch.test; +public class NamingConventionIT { -/** - * This class should fail the naming conventions self test. - */ -public class WrongName extends NamingConventionsCheckBadClasses.UnitTestCase { -} +} \ No newline at end of file diff --git a/buildSrc/src/testKit/namingConventionsSelfTest/src/test/java/org/elasticsearch/test/NamingConventionsCheckInMainTests.java b/buildSrc/src/testKit/testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionTests.java similarity index 84% rename from buildSrc/src/testKit/namingConventionsSelfTest/src/test/java/org/elasticsearch/test/NamingConventionsCheckInMainTests.java rename to buildSrc/src/testKit/testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionTests.java index 27c0b41eb3f6a..6afb89ddf56b0 100644 --- a/buildSrc/src/testKit/namingConventionsSelfTest/src/test/java/org/elasticsearch/test/NamingConventionsCheckInMainTests.java +++ b/buildSrc/src/testKit/testingConventions/tests_in_main/src/main/java/org/elasticsearch/gradle/testkit/NamingConventionTests.java @@ -16,11 +16,8 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.gradle.testkit; -package org.elasticsearch.test; +public class NamingConventionTests { -/** - * This class should fail the naming conventions self test. - */ -public class NamingConventionsCheckInMainTests { } diff --git a/client/rest/build.gradle b/client/rest/build.gradle index a6d8eb8467dab..6b22b7b909909 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -71,12 +71,6 @@ forbiddenApisTest { // TODO: Not anymore. Now in :libs:core jarHell.enabled=false -namingConventions { - testClass = 'org.elasticsearch.client.RestClientTestCase' - //we don't have integration tests - skipIntegTestInDisguise = true -} - testingConventions { naming.clear() naming { diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index 9f2dd73c5c8e7..382a3f3c9d121 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -72,12 +72,6 @@ dependencyLicenses { // TODO: Not anymore. Now in :libs:core jarHell.enabled=false -namingConventions { - testClass = 'org.elasticsearch.client.RestClientTestCase' - //we don't have integration tests - skipIntegTestInDisguise = true -} - testingConventions { naming.clear() naming { diff --git a/client/test/build.gradle b/client/test/build.gradle index e8963b76c5e4b..f184cfbb73c3d 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -51,8 +51,6 @@ jarHell.enabled=false dependencyLicenses.enabled = false dependenciesInfo.enabled = false -namingConventions.enabled = false - //we aren't releasing this jar thirdPartyAudit.enabled = false unitTest.enabled = false diff --git a/client/transport/build.gradle b/client/transport/build.gradle index 7516e5eb89cce..e0292cd557438 100644 --- a/client/transport/build.gradle +++ b/client/transport/build.gradle @@ -47,12 +47,6 @@ forbiddenApisTest { replaceSignatureFiles 'jdk-signatures', 'es-all-signatures' } -namingConventions { - testClass = 'com.carrotsearch.randomizedtesting.RandomizedTest' - //we don't have integration tests - skipIntegTestInDisguise = true -} - testingConventions { naming.clear() naming { diff --git a/distribution/tools/java-version-checker/build.gradle b/distribution/tools/java-version-checker/build.gradle index 48014a42a4d99..03ac32d20b7d6 100644 --- a/distribution/tools/java-version-checker/build.gradle +++ b/distribution/tools/java-version-checker/build.gradle @@ -8,7 +8,6 @@ forbiddenApisMain { } unitTest.enabled = false -namingConventions.enabled = false javadoc.enabled = false loggerUsageCheck.enabled = false jarHell.enabled = false diff --git a/distribution/tools/launchers/build.gradle b/distribution/tools/launchers/build.gradle index 4c7d171663a0f..b7b12170f66e1 100644 --- a/distribution/tools/launchers/build.gradle +++ b/distribution/tools/launchers/build.gradle @@ -33,11 +33,6 @@ tasks.withType(CheckForbiddenApis) { replaceSignatureFiles 'jdk-signatures' } -namingConventions { - testClass = 'org.elasticsearch.tools.launchers.LaunchersTestCase' - skipIntegTestInDisguise = true -} - testingConventions { naming.clear() naming { diff --git a/libs/secure-sm/build.gradle b/libs/secure-sm/build.gradle index 97b6652fc12a1..d9a6e30b83aab 100644 --- a/libs/secure-sm/build.gradle +++ b/libs/secure-sm/build.gradle @@ -63,10 +63,6 @@ if (isEclipse) { // JAR hell is part of core which we do not want to add as a dependency jarHell.enabled = false -namingConventions { - testClass = 'junit.framework.TestCase' -} - testingConventions { naming.clear() naming { diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index d1ded46d2b684..bd5f3e7a2ac1c 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -72,6 +72,8 @@ forbiddenApisMain { // we don't have additional tests for the tests themselves tasks.unitTest.enabled = false +// Tests are destructive and meant to run in a VM, they don't adhere to general conventions +testingConventions.enabled = false // this project doesn't get published tasks.dependencyLicenses.enabled = false diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 26a36852d378d..fbc87988837cc 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -64,12 +64,6 @@ thirdPartyAudit.ignoreMissingClasses ( 'org.jmock.core.Constraint' ) -task namingConventionsMain(type: org.elasticsearch.gradle.precommit.NamingConventionsTask) { - checkForTestsInMain = true - javaHome = project.compilerJavaHome -} -precommit.dependsOn namingConventionsMain - unitTest { systemProperty 'tests.gradle_index_compat_versions', bwcVersions.indexCompatible.join(',') systemProperty 'tests.gradle_wire_compat_versions', bwcVersions.wireCompatible.join(',') diff --git a/x-pack/qa/openldap-tests/build.gradle b/x-pack/qa/openldap-tests/build.gradle index 8c87f7e616400..6e2c91dff75ab 100644 --- a/x-pack/qa/openldap-tests/build.gradle +++ b/x-pack/qa/openldap-tests/build.gradle @@ -29,8 +29,3 @@ if (project.rootProject.vagrantSupported) { unitTest.enabled = false testingConventions.enabled = false } - -namingConventions { - // integ tests use Tests instead of IT - skipIntegTestInDisguise = true -} diff --git a/x-pack/transport-client/build.gradle b/x-pack/transport-client/build.gradle index 87a626be65d42..90e3f96418f08 100644 --- a/x-pack/transport-client/build.gradle +++ b/x-pack/transport-client/build.gradle @@ -23,12 +23,6 @@ forbiddenApisTest { replaceSignatureFiles 'jdk-signatures', 'es-all-signatures' } -namingConventions { - testClass = 'com.carrotsearch.randomizedtesting.RandomizedTest' - //we don't have integration tests - skipIntegTestInDisguise = true -} - testingConventions { naming.clear() naming { From f6b6f927ec83b0067d330d606dcf869db97d7029 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Thu, 24 Jan 2019 17:33:08 +0200 Subject: [PATCH 08/64] Fail randomized testing tasks if no tests ran (#37764) Reverts #36259 in part to make randomized test fail if no tests are ran. This is useful when filtering tests as it's easy to make a typo and think the test ran trough successfully. --- .../src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 1 + 1 file changed, 1 insertion(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 54a14138505cd..893f767e26e3f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -904,6 +904,7 @@ class BuildPlugin implements Plugin { project.tasks.withType(RandomizedTestingTask) {task -> jvm "${project.runtimeJavaHome}/bin/java" parallelism System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel) + ifNoTests 'fail' onNonEmptyWorkDirectory 'wipe' leaveTemporary true project.sourceSets.matching { it.name == "test" }.all { test -> From 74b6f308e9b157db947b961d536e6d0bd3b18dd5 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Thu, 24 Jan 2019 18:40:20 +0200 Subject: [PATCH 09/64] SQL: Fix issue with complex expression as args of PERCENTILE/_RANK (#37102) When the arguements of PERCENTILE and PERCENTILE_RANK can be folded, the `ConstantFolding` rule kicks in and calls the `replaceChildren()` method on `InnerAggregate` which is created from the aggregation rules of the `Optimizerz. `InnerAggregate` in turn, cannot implement the method as the logic of creating a new `InnerAggregate` instance from a list of `Expression`s resides in the Optimizer. So, instead, `ConstantFolding` should be applied before any of the aggregations related rules. Fixes: #37099 --- .../sql/qa/src/main/resources/agg.csv-spec | 4 ++-- .../xpack/sql/optimizer/Optimizer.java | 2 +- .../xpack/sql/planner/QueryFolderTests.java | 21 +++++++++++++++++++ 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec index 7051353d78dda..1cf3acdcfa4a6 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec @@ -3,7 +3,7 @@ // singlePercentileWithoutComma -SELECT gender, PERCENTILE(emp_no, 97) p1 FROM test_emp GROUP BY gender; +SELECT gender, PERCENTILE(emp_no, 90 + 7) p1 FROM test_emp GROUP BY gender; gender:s | p1:d null |10019.0 @@ -48,7 +48,7 @@ M |10084.349 |10093.502 ; percentileRank -SELECT gender, PERCENTILE_RANK(emp_no, 10025) rank FROM test_emp GROUP BY gender; +SELECT gender, PERCENTILE_RANK(emp_no, 10000 + 25) rank FROM test_emp GROUP BY gender; gender:s | rank:d null |100.0 diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java index ade69463d1345..1865fd4eea126 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -159,7 +159,7 @@ protected Iterable.Batch> batches() { Batch label = new Batch("Set as Optimized", Limiter.ONCE, new SetAsOptimized()); - return Arrays.asList(aggregate, operators, local, label); + return Arrays.asList(operators, aggregate, local, label); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java index 44f50b53b5aa3..17b1eedf06d93 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.sql.analysis.index.EsIndex; import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; import org.elasticsearch.xpack.sql.optimizer.Optimizer; import org.elasticsearch.xpack.sql.parser.SqlParser; import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; @@ -316,4 +317,24 @@ public void testConcatIsNotFoldedForNull() { assertEquals(1, ee.output().size()); assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); } + + public void testFoldingOfPercentileSecondArgument() { + PhysicalPlan p = plan("SELECT PERCENTILE(int, 1 + 2) FROM test"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec ee = (EsQueryExec) p; + assertEquals(1, ee.output().size()); + assertEquals(AggregateFunctionAttribute.class, ee.output().get(0).getClass()); + AggregateFunctionAttribute afa = (AggregateFunctionAttribute) ee.output().get(0); + assertThat(afa.propertyPath(), endsWith("[3.0]")); + } + + public void testFoldingOfPercentileRankSecondArgument() { + PhysicalPlan p = plan("SELECT PERCENTILE_RANK(int, 1 + 2) FROM test"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec ee = (EsQueryExec) p; + assertEquals(1, ee.output().size()); + assertEquals(AggregateFunctionAttribute.class, ee.output().get(0).getClass()); + AggregateFunctionAttribute afa = (AggregateFunctionAttribute) ee.output().get(0); + assertThat(afa.propertyPath(), endsWith("[3.0]")); + } } From 0e36adc35fafbb0a3fb5bfa459f9cb7fac520785 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 24 Jan 2019 14:08:30 +0000 Subject: [PATCH 10/64] Mute SimpleClusterStateIT testMetadataVersion Due to https://github.com/elastic/elasticsearch/issues/37820 --- .../java/org/elasticsearch/cluster/SimpleClusterStateIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 8025d588820f9..45c4d5d3927f7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -119,6 +119,7 @@ public void testMetadata() throws Exception { assertThat(clusterStateResponse.getState().metaData().indices().size(), is(0)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37820") public void testMetadataVersion() { createIndex("index-1"); createIndex("index-2"); From 187b233571073e27f4ba04d0b05595cc1e7b53ce Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 24 Jan 2019 11:57:37 +0000 Subject: [PATCH 11/64] Read m_m_n from cluster states from 6.7 This completes the BWC serialisation changes required for a 6.7 master to inform other nodes of the node-level value of the `minimum_master_nodes` setting. Relates #37701, #37811 --- .../main/java/org/elasticsearch/cluster/ClusterState.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 200f5b59d5416..7660ca9da92fa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -804,7 +804,7 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr Custom customIndexMetaData = in.readNamedWriteable(Custom.class); builder.putCustom(customIndexMetaData.getWriteableName(), customIndexMetaData); } - builder.minimumMasterNodesOnPublishingMaster = in.getVersion().onOrAfter(Version.V_7_0_0) ? in.readVInt() : -1; + builder.minimumMasterNodesOnPublishingMaster = in.getVersion().onOrAfter(Version.V_6_7_0) ? in.readVInt() : -1; return builder.build(); } @@ -830,7 +830,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(cursor.value); } } - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { out.writeVInt(minimumMasterNodesOnPublishingMaster); } } @@ -880,7 +880,7 @@ private static class ClusterStateDiff implements Diff { metaData = MetaData.readDiffFrom(in); blocks = ClusterBlocks.readDiffFrom(in); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); - minimumMasterNodesOnPublishingMaster = in.getVersion().onOrAfter(Version.V_7_0_0) ? in.readVInt() : -1; + minimumMasterNodesOnPublishingMaster = in.getVersion().onOrAfter(Version.V_6_7_0) ? in.readVInt() : -1; } @Override @@ -894,7 +894,7 @@ public void writeTo(StreamOutput out) throws IOException { metaData.writeTo(out); blocks.writeTo(out); customs.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { out.writeVInt(minimumMasterNodesOnPublishingMaster); } } From 76fb57356956bd8d9d1e8d73241b8abb3048944d Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 24 Jan 2019 12:13:00 -0500 Subject: [PATCH 12/64] Do not allow put mapping on follower (#37675) Today, the mapping on the follower is managed and replicated from its leader index by the ShardFollowTask. Thus, we should prevent users from modifying the mapping on the follower indices. Relates #30086 --- .../elasticsearch/action/ActionModule.java | 6 ++ .../mapping/put/MappingRequestValidator.java | 40 ++++++++ .../mapping/put/PutMappingRequest.java | 19 ++++ .../put/TransportPutMappingAction.java | 34 ++++++- .../elasticsearch/plugins/ActionPlugin.java | 10 ++ .../put/ValidateMappingRequestPluginIT.java | 99 +++++++++++++++++++ .../java/org/elasticsearch/xpack/ccr/Ccr.java | 7 ++ .../xpack/ccr/action/CcrRequests.java | 29 ++++++ .../xpack/ccr/IndexFollowingIT.java | 21 ++++ .../core/LocalStateCompositeXPackPlugin.java | 7 ++ 10 files changed, 271 insertions(+), 1 deletion(-) create mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/MappingRequestValidator.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 142aa6bde74b6..8a8cea82b0a4d 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -357,6 +357,7 @@ public class ActionModule extends AbstractModule { private final AutoCreateIndex autoCreateIndex; private final DestructiveOperations destructiveOperations; private final RestController restController; + private final TransportPutMappingAction.RequestValidators mappingRequestValidators; public ActionModule(boolean transportClient, Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndexScopedSettings indexScopedSettings, ClusterSettings clusterSettings, SettingsFilter settingsFilter, @@ -388,6 +389,10 @@ public ActionModule(boolean transportClient, Settings settings, IndexNameExpress restWrapper = newRestWrapper; } } + mappingRequestValidators = new TransportPutMappingAction.RequestValidators( + actionPlugins.stream().flatMap(p -> p.mappingRequestValidators().stream()).collect(Collectors.toList()) + ); + if (transportClient) { restController = null; } else { @@ -678,6 +683,7 @@ public void initRestHandlers(Supplier nodesInCluster) { protected void configure() { bind(ActionFilters.class).toInstance(actionFilters); bind(DestructiveOperations.class).toInstance(destructiveOperations); + bind(TransportPutMappingAction.RequestValidators.class).toInstance(mappingRequestValidators); if (false == transportClient) { // Supporting classes only used when not a transport client diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/MappingRequestValidator.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/MappingRequestValidator.java new file mode 100644 index 0000000000000..8d6608c575874 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/MappingRequestValidator.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.mapping.put; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.index.Index; + +/** + * A validator that validates a {@link PutMappingRequest} before executing it. + * @see TransportPutMappingAction.RequestValidators + */ +public interface MappingRequestValidator { + + /** + * Validates a given put mapping request with its associated concrete indices and the current state. + * + * @param request the request to validate + * @param state the current cluster state + * @param indices the concrete indices that associated with the given put mapping request + * @return a non-null exception indicates a reason that the given request should be aborted; otherwise returns null. + */ + Exception validateRequest(PutMappingRequest request, ClusterState state, Index[] indices); +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 6868708145359..9b903a81e0327 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -74,6 +74,7 @@ public class PutMappingRequest extends AcknowledgedRequest im private String type; private String source; + private String origin = ""; private Index concreteIndex; @@ -184,6 +185,16 @@ public PutMappingRequest source(Object... source) { return source(buildFromSimplifiedDef(type, source)); } + public String origin() { + return origin; + } + + public PutMappingRequest origin(String origin) { + // reserve "null" for bwc. + this.origin = Objects.requireNonNull(origin); + return this; + } + /** * @param type * the mapping type @@ -301,6 +312,11 @@ public void readFrom(StreamInput in) throws IOException { in.readBoolean(); // updateAllTypes } concreteIndex = in.readOptionalWriteable(Index::new); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + origin = in.readOptionalString(); + } else { + origin = null; + } } @Override @@ -314,6 +330,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); // updateAllTypes } out.writeOptionalWriteable(concreteIndex); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeOptionalString(origin); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index 565fd0616d028..acd0d10281463 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -37,20 +37,25 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Collection; + /** * Put mapping action. */ public class TransportPutMappingAction extends TransportMasterNodeAction { private final MetaDataMappingService metaDataMappingService; + private final RequestValidators requestValidators; @Inject public TransportPutMappingAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataMappingService metaDataMappingService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + RequestValidators requestValidators) { super(PutMappingAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutMappingRequest::new); this.metaDataMappingService = metaDataMappingService; + this.requestValidators = requestValidators; } @Override @@ -82,6 +87,11 @@ protected void masterOperation(final PutMappingRequest request, final ClusterSta final Index[] concreteIndices = request.getConcreteIndex() == null ? indexNameExpressionResolver.concreteIndices(state, request) : new Index[] {request.getConcreteIndex()}; + final Exception validationException = requestValidators.validateRequest(request, state, concreteIndices); + if (validationException != null) { + listener.onFailure(validationException); + return; + } PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices).type(request.type()) @@ -107,4 +117,26 @@ public void onFailure(Exception t) { throw ex; } } + + + public static class RequestValidators { + private final Collection validators; + + public RequestValidators(Collection validators) { + this.validators = validators; + } + + private Exception validateRequest(PutMappingRequest request, ClusterState state, Index[] indices) { + Exception firstException = null; + for (MappingRequestValidator validator : validators) { + final Exception e = validator.validateRequest(request, state, indices); + if (firstException == null) { + firstException = e; + } else { + firstException.addSuppressed(e); + } + } + return firstException; + } + } } diff --git a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java index c0d94c3f000c8..adc2fa8f0b282 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java @@ -22,6 +22,8 @@ import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.indices.mapping.put.MappingRequestValidator; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.TransportActions; @@ -179,4 +181,12 @@ public int hashCode() { return Objects.hash(action, transportAction, supportTransportActions); } } + + /** + * Returns a collection of validators that are used by {@link TransportPutMappingAction.RequestValidators} to + * validate a {@link org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest} before the executing it. + */ + default Collection mappingRequestValidators() { + return Collections.emptyList(); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java new file mode 100644 index 0000000000000..b25c9ecb5fc91 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.mapping.put; + +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class ValidateMappingRequestPluginIT extends ESSingleNodeTestCase { + static final Map> allowedOrigins = ConcurrentCollections.newConcurrentMap(); + public static class TestPlugin extends Plugin implements ActionPlugin { + @Override + public Collection mappingRequestValidators() { + return Collections.singletonList((request, state, indices) -> { + for (Index index : indices) { + if (allowedOrigins.getOrDefault(index.getName(), Collections.emptySet()).contains(request.origin()) == false) { + return new IllegalStateException("not allowed: index[" + index.getName() + "] origin[" + request.origin() + "]"); + } + } + return null; + }); + } + } + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(TestPlugin.class); + } + + public void testValidateMappingRequest() { + createIndex("index_1"); + createIndex("index_2"); + allowedOrigins.put("index_1", Arrays.asList("1", "2")); + allowedOrigins.put("index_2", Arrays.asList("2", "3")); + { + String origin = randomFrom("", "3", "4", "5"); + PutMappingRequest request = new PutMappingRequest().indices("index_1").type("doc").source("t1", "type=keyword").origin(origin); + Exception e = expectThrows(IllegalStateException.class, () -> client().admin().indices().putMapping(request).actionGet()); + assertThat(e.getMessage(), equalTo("not allowed: index[index_1] origin[" + origin + "]")); + } + { + PutMappingRequest request = new PutMappingRequest().indices("index_1").origin(randomFrom("1", "2")) + .type("doc").source("t1", "type=keyword"); + assertAcked(client().admin().indices().putMapping(request).actionGet()); + } + + { + String origin = randomFrom("", "1", "4", "5"); + PutMappingRequest request = new PutMappingRequest().indices("index_2").type("doc").source("t2", "type=keyword").origin(origin); + Exception e = expectThrows(IllegalStateException.class, () -> client().admin().indices().putMapping(request).actionGet()); + assertThat(e.getMessage(), equalTo("not allowed: index[index_2] origin[" + origin + "]")); + } + { + PutMappingRequest request = new PutMappingRequest().indices("index_2").origin(randomFrom("2", "3")) + .type("doc").source("t1", "type=keyword"); + assertAcked(client().admin().indices().putMapping(request).actionGet()); + } + + { + String origin = randomFrom("", "1", "3", "4"); + PutMappingRequest request = new PutMappingRequest().indices("*").type("doc").source("t3", "type=keyword").origin(origin); + Exception e = expectThrows(IllegalStateException.class, () -> client().admin().indices().putMapping(request).actionGet()); + assertThat(e.getMessage(), containsString("not allowed:")); + } + { + PutMappingRequest request = new PutMappingRequest().indices("index_2").origin("2") + .type("doc").source("t3", "type=keyword"); + assertAcked(client().admin().indices().putMapping(request).actionGet()); + } + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 8ab9e396b4dc1..acda8d06dc550 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.indices.mapping.put.MappingRequestValidator; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -45,6 +46,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator; +import org.elasticsearch.xpack.ccr.action.CcrRequests; import org.elasticsearch.xpack.ccr.action.ShardChangesAction; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; import org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor; @@ -312,4 +314,9 @@ public void onIndexModule(IndexModule indexModule) { protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + @Override + public Collection mappingRequestValidators() { + return Collections.singletonList(CcrRequests.CCR_PUT_MAPPING_REQUEST_VALIDATOR); + } + } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java index 12432c740a701..87d913c337642 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java @@ -5,10 +5,20 @@ */ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.MappingRequestValidator; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.ccr.CcrSettings; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; public final class CcrRequests { @@ -24,8 +34,27 @@ public static ClusterStateRequest metaDataRequest(String leaderIndex) { public static PutMappingRequest putMappingRequest(String followerIndex, MappingMetaData mappingMetaData) { PutMappingRequest putMappingRequest = new PutMappingRequest(followerIndex); + putMappingRequest.origin("ccr"); putMappingRequest.type(mappingMetaData.type()); putMappingRequest.source(mappingMetaData.source().string(), XContentType.JSON); return putMappingRequest; } + + public static final MappingRequestValidator CCR_PUT_MAPPING_REQUEST_VALIDATOR = (request, state, indices) -> { + if (request.origin() == null) { + return null; // a put-mapping-request on old versions does not have origin. + } + final List followingIndices = Arrays.stream(indices) + .filter(index -> { + final IndexMetaData indexMetaData = state.metaData().index(index); + return indexMetaData != null && CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(indexMetaData.getSettings()); + }).collect(Collectors.toList()); + if (followingIndices.isEmpty() == false && "ccr".equals(request.origin()) == false) { + final String errorMessage = "can't put mapping to the following indices " + + "[" + followingIndices.stream().map(Index::getName).collect(Collectors.joining(", ")) + "]; " + + "the mapping of the following indices are self-replicated from its leader indices"; + return new ElasticsearchStatusException(errorMessage, RestStatus.FORBIDDEN); + } + return null; + }; } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 857445ad88de8..e811480e1b1a0 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ccr; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -46,6 +47,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; @@ -209,6 +211,25 @@ public void testNoMappingDefined() throws Exception { assertThat(XContentMapValues.extractValue("properties.k", mappingMetaData.sourceAsMap()), nullValue()); } + public void testDoNotAllowPutMappingToFollower() throws Exception { + final String leaderIndexSettings = getIndexSettings(between(1, 2), between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate("index-1").setSource(leaderIndexSettings, XContentType.JSON)); + followerClient().execute(PutFollowAction.INSTANCE, putFollow("index-1", "index-2")).get(); + PutMappingRequest putMappingRequest = new PutMappingRequest("index-2").type("doc").source("new_field", "type=keyword"); + ElasticsearchStatusException forbiddenException = expectThrows(ElasticsearchStatusException.class, + () -> followerClient().admin().indices().putMapping(putMappingRequest).actionGet()); + assertThat(forbiddenException.getMessage(), + equalTo("can't put mapping to the following indices [index-2]; " + + "the mapping of the following indices are self-replicated from its leader indices")); + assertThat(forbiddenException.status(), equalTo(RestStatus.FORBIDDEN)); + pauseFollow("index-2"); + followerClient().admin().indices().close(new CloseIndexRequest("index-2")).actionGet(); + assertAcked(followerClient().execute(UnfollowAction.INSTANCE, new UnfollowAction.Request("index-2")).actionGet()); + followerClient().admin().indices().open(new OpenIndexRequest("index-2")).actionGet(); + assertAcked(followerClient().admin().indices().putMapping(putMappingRequest).actionGet()); + } + public void testFollowIndex_backlog() throws Exception { int numberOfShards = between(1, 5); String leaderIndexSettings = getIndexSettings(numberOfShards, between(0, 1), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index 2b19eea5b567f..1dd07a5df81ff 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -7,6 +7,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.indices.mapping.put.MappingRequestValidator; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.Client; @@ -427,6 +428,12 @@ public Optional getEngineFactory(IndexSettings indexSettings) { } } + @Override + public Collection mappingRequestValidators() { + return filterPlugins(ActionPlugin.class).stream().flatMap(p -> p.mappingRequestValidators().stream()) + .collect(Collectors.toList()); + } + private List filterPlugins(Class type) { return plugins.stream().filter(x -> type.isAssignableFrom(x.getClass())).map(p -> ((T)p)) .collect(Collectors.toList()); From 33cac52b2c1919048e5e0772f1cb8911e88a456c Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 24 Jan 2019 09:14:08 -0800 Subject: [PATCH 13/64] [DOCS] Remove beta tag from metricbeat monitoring (#37791) --- docs/reference/monitoring/configuring-metricbeat.asciidoc | 2 -- docs/reference/monitoring/index.asciidoc | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index dd7811b34219e..a161559d3f103 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -6,8 +6,6 @@ Collecting monitoring data with {metricbeat} ++++ -beta[] - In 6.5 and later, you can use {metricbeat} to collect data about {es} and ship it to the monitoring cluster, rather than routing it through exporters as described in <>. diff --git a/docs/reference/monitoring/index.asciidoc b/docs/reference/monitoring/index.asciidoc index 13e7314f8af5f..fbda72e0f979a 100644 --- a/docs/reference/monitoring/index.asciidoc +++ b/docs/reference/monitoring/index.asciidoc @@ -33,7 +33,7 @@ indexing (storage). The routing and indexing processes in {es} are handled by what are called <> and <>. -beta[] Alternatively, in 6.4 and later, you can use {metricbeat} to collect +Alternatively, in 6.4 and later, you can use {metricbeat} to collect monitoring data about {kib} and ship it directly to the monitoring cluster, rather than routing it through the production cluster. In 6.5 and later, you can also use {metricbeat} to collect and ship data about {es}. From 289106a578284aa40f1026c09dd8fa4ff4ea23ee Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Thu, 24 Jan 2019 10:12:14 -0800 Subject: [PATCH 14/64] Refactor GeoHashGrid to be abstract and re-usable (#37742) This change split out all the specific GeoHash classes for the geohash_grid aggregation into abstract GeoGrid classes that can be re-used for specific hashing types, like `geohash` --- .../client/RestHighLevelClient.java | 4 +- .../elasticsearch/search/SearchModule.java | 6 +- .../aggregations/AggregationBuilders.java | 10 +- .../bucket/geogrid/BucketPriorityQueue.java | 40 ++++ .../bucket/geogrid/CellIdSource.java | 23 ++- .../{GeoHashGrid.java => GeoGrid.java} | 9 +- .../geogrid/GeoGridAggregationBuilder.java | 74 +++---- .../bucket/geogrid/GeoGridAggregator.java | 192 ++++++++++++++++++ .../GeoHashGridAggregationBuilder.java | 74 +++++++ .../bucket/geogrid/GeoHashGridAggregator.java | 110 +--------- .../geogrid/GeoHashGridAggregatorFactory.java | 5 +- .../bucket/geogrid/InternalGeoGrid.java | 147 ++++++++++++++ ...Bucket.java => InternalGeoGridBucket.java} | 30 ++- .../bucket/geogrid/InternalGeoHashGrid.java | 134 ++---------- .../geogrid/InternalGeoHashGridBucket.java | 55 +++++ .../bucket/geogrid/ParsedGeoGrid.java | 49 +++++ ...idParams.java => ParsedGeoGridBucket.java} | 21 +- .../bucket/geogrid/ParsedGeoHashGrid.java | 52 +---- .../geogrid/ParsedGeoHashGridBucket.java | 41 ++++ .../support/AggregationInspectionHelper.java | 4 +- .../aggregations/AggregationsTests.java | 4 +- .../aggregations/bucket/GeoHashGridIT.java | 26 +-- .../aggregations/bucket/GeoHashGridTests.java | 5 +- .../aggregations/bucket/ShardReduceIT.java | 4 +- .../geogrid/GeoHashGridAggregatorTests.java | 4 +- .../geogrid/GeoHashGridParserTests.java | 14 +- ...shGridTests.java => GeoHashGridTests.java} | 39 ++-- .../aggregations/metrics/GeoCentroidIT.java | 8 +- .../test/InternalAggregationTestCase.java | 4 +- 29 files changed, 778 insertions(+), 410 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java rename server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/{GeoHashGrid.java => GeoGrid.java} (73%) create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java rename server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/{GeoGridBucket.java => InternalGeoGridBucket.java} (80%) create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java rename server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/{GeoHashGridParams.java => ParsedGeoGridBucket.java} (59%) create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java rename server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/{InternalGeoHashGridTests.java => GeoHashGridTests.java} (75%) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index e82c2dc620494..5ef0e0110c12d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -93,7 +93,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilter; import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; @@ -1758,7 +1758,7 @@ static List getDefaultNamedXContents() { map.put(GlobalAggregationBuilder.NAME, (p, c) -> ParsedGlobal.fromXContent(p, (String) c)); map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); - map.put(GeoGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); + map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index e75271c2885ae..3d93effecc545 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -108,7 +108,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilters; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal; @@ -420,8 +420,8 @@ private void registerAggregations(List plugins) { AutoDateHistogramAggregationBuilder::parse).addResultReader(InternalAutoDateHistogram::new)); registerAggregation(new AggregationSpec(GeoDistanceAggregationBuilder.NAME, GeoDistanceAggregationBuilder::new, GeoDistanceAggregationBuilder::parse).addResultReader(InternalGeoDistance::new)); - registerAggregation(new AggregationSpec(GeoGridAggregationBuilder.NAME, GeoGridAggregationBuilder::new, - GeoGridAggregationBuilder::parse).addResultReader(InternalGeoHashGrid::new)); + registerAggregation(new AggregationSpec(GeoHashGridAggregationBuilder.NAME, GeoHashGridAggregationBuilder::new, + GeoHashGridAggregationBuilder::parse).addResultReader(InternalGeoHashGrid::new)); registerAggregation(new AggregationSpec(NestedAggregationBuilder.NAME, NestedAggregationBuilder::new, NestedAggregationBuilder::parse).addResultReader(InternalNested::new)); registerAggregation(new AggregationSpec(ReverseNestedAggregationBuilder.NAME, ReverseNestedAggregationBuilder::new, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java index 6d8c8a94f3e6f..fd56172325230 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java @@ -28,8 +28,8 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filters; import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; @@ -244,10 +244,10 @@ public static HistogramAggregationBuilder histogram(String name) { } /** - * Create a new {@link GeoHashGrid} aggregation with the given name. + * Create a new {@link InternalGeoHashGrid} aggregation with the given name. */ - public static GeoGridAggregationBuilder geohashGrid(String name) { - return new GeoGridAggregationBuilder(name); + public static GeoHashGridAggregationBuilder geohashGrid(String name) { + return new GeoHashGridAggregationBuilder(name); } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java new file mode 100644 index 0000000000000..9a2cd7ab880f0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.apache.lucene.util.PriorityQueue; + +class BucketPriorityQueue extends PriorityQueue { + + BucketPriorityQueue(int size) { + super(size); + } + + @Override + protected boolean lessThan(InternalGeoGridBucket o1, InternalGeoGridBucket o2) { + int cmp = Long.compare(o2.getDocCount(), o1.getDocCount()); + if (cmp == 0) { + cmp = o2.compareTo(o1); + if (cmp == 0) { + cmp = System.identityHashCode(o2) - System.identityHashCode(o1); + } + } + return cmp > 0; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java index 268a27b4669db..fce0747b3dc60 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java @@ -20,7 +20,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; -import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.index.fielddata.AbstractSortingNumericDocValues; import org.elasticsearch.index.fielddata.MultiGeoPointValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; @@ -36,11 +35,13 @@ class CellIdSource extends ValuesSource.Numeric { private final ValuesSource.GeoPoint valuesSource; private final int precision; + private final GeoPointLongEncoder encoder; - CellIdSource(GeoPoint valuesSource, int precision) { + CellIdSource(GeoPoint valuesSource, int precision, GeoPointLongEncoder encoder) { this.valuesSource = valuesSource; //different GeoPoints could map to the same or different geohash cells. this.precision = precision; + this.encoder = encoder; } public int precision() { @@ -54,7 +55,7 @@ public boolean isFloatingPoint() { @Override public SortedNumericDocValues longValues(LeafReaderContext ctx) { - return new CellValues(valuesSource.geoPointValues(ctx), precision); + return new CellValues(valuesSource.geoPointValues(ctx), precision, encoder); } @Override @@ -67,13 +68,24 @@ public SortedBinaryDocValues bytesValues(LeafReaderContext ctx) { throw new UnsupportedOperationException(); } + /** + * The encoder to use to convert a geopoint's (lon, lat, precision) into + * a long-encoded bucket key for aggregating. + */ + @FunctionalInterface + public interface GeoPointLongEncoder { + long encode(double lon, double lat, int precision); + } + private static class CellValues extends AbstractSortingNumericDocValues { private MultiGeoPointValues geoValues; private int precision; + private GeoPointLongEncoder encoder; - protected CellValues(MultiGeoPointValues geoValues, int precision) { + protected CellValues(MultiGeoPointValues geoValues, int precision, GeoPointLongEncoder encoder) { this.geoValues = geoValues; this.precision = precision; + this.encoder = encoder; } @Override @@ -82,8 +94,7 @@ public boolean advanceExact(int docId) throws IOException { resize(geoValues.docValueCount()); for (int i = 0; i < docValueCount(); ++i) { org.elasticsearch.common.geo.GeoPoint target = geoValues.nextValue(); - values[i] = GeoHashUtils.longEncode(target.getLon(), target.getLat(), - precision); + values[i] = encoder.encode(target.getLon(), target.getLat(), precision); } sort(); return true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGrid.java similarity index 73% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGrid.java rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGrid.java index 9cce698957d70..e320836eabf10 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGrid.java @@ -23,19 +23,20 @@ import java.util.List; /** - * A {@code geohash_grid} aggregation. Defines multiple buckets, each representing a cell in a geo-grid of a specific + * A geo-grid aggregation. Defines multiple buckets, each representing a cell in a geo-grid of a specific * precision. */ -public interface GeoHashGrid extends MultiBucketsAggregation { +public interface GeoGrid extends MultiBucketsAggregation { /** - * A bucket that is associated with a {@code geohash_grid} cell. The key of the bucket is the {@code geohash} of the cell + * A bucket that is associated with a geo-grid cell. The key of the bucket is + * the {@link InternalGeoGridBucket#getKeyAsString()} of the cell */ interface Bucket extends MultiBucketsAggregation.Bucket { } /** - * @return The buckets of this aggregation (each bucket representing a geohash grid cell) + * @return The buckets of this aggregation (each bucket representing a geo-grid cell) */ @Override List getBuckets(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 85e4c8b228e1a..c90c77b91be37 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -20,13 +20,12 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.bucket.BucketUtils; @@ -44,31 +43,31 @@ import java.util.Map; import java.util.Objects; -import static org.elasticsearch.common.geo.GeoUtils.parsePrecision; - -public class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder +public abstract class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder implements MultiBucketAggregationBuilder { - public static final String NAME = "geohash_grid"; - public static final int DEFAULT_PRECISION = 5; - public static final int DEFAULT_MAX_NUM_CELLS = 10000; - - private static final ObjectParser PARSER; - static { - PARSER = new ObjectParser<>(GeoGridAggregationBuilder.NAME); - ValuesSourceParserHelper.declareGeoFields(PARSER, false, false); - PARSER.declareField((parser, builder, context) -> builder.precision(parsePrecision(parser)), GeoHashGridParams.FIELD_PRECISION, - org.elasticsearch.common.xcontent.ObjectParser.ValueType.INT); - PARSER.declareInt(GeoGridAggregationBuilder::size, GeoHashGridParams.FIELD_SIZE); - PARSER.declareInt(GeoGridAggregationBuilder::shardSize, GeoHashGridParams.FIELD_SHARD_SIZE); - } + /* recognized field names in JSON */ + static final ParseField FIELD_PRECISION = new ParseField("precision"); + static final ParseField FIELD_SIZE = new ParseField("size"); + static final ParseField FIELD_SHARD_SIZE = new ParseField("shard_size"); - public static GeoGridAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { - return PARSER.parse(parser, new GeoGridAggregationBuilder(aggregationName), null); + protected int precision; + protected int requiredSize; + protected int shardSize; + + @FunctionalInterface + protected interface PrecisionParser { + int parse(XContentParser parser) throws IOException; } - private int precision = DEFAULT_PRECISION; - private int requiredSize = DEFAULT_MAX_NUM_CELLS; - private int shardSize = -1; + public static ObjectParser createParser(String name, PrecisionParser precisionParser) { + ObjectParser parser = new ObjectParser<>(name); + ValuesSourceParserHelper.declareGeoFields(parser, false, false); + parser.declareField((p, builder, context) -> builder.precision(precisionParser.parse(p)), FIELD_PRECISION, + org.elasticsearch.common.xcontent.ObjectParser.ValueType.INT); + parser.declareInt(GeoGridAggregationBuilder::size, FIELD_SIZE); + parser.declareInt(GeoGridAggregationBuilder::shardSize, FIELD_SHARD_SIZE); + return parser; + } public GeoGridAggregationBuilder(String name) { super(name, ValuesSourceType.GEOPOINT, ValueType.GEOPOINT); @@ -79,11 +78,7 @@ protected GeoGridAggregationBuilder(GeoGridAggregationBuilder clone, Builder fac this.precision = clone.precision; this.requiredSize = clone.requiredSize; this.shardSize = clone.shardSize; - } - @Override - protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map metaData) { - return new GeoGridAggregationBuilder(this, factoriesBuilder, metaData); } /** @@ -103,10 +98,12 @@ protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(shardSize); } - public GeoGridAggregationBuilder precision(int precision) { - this.precision = GeoUtils.checkPrecisionRange(precision); - return this; - } + /** + * method to validate and set the precision value + * @param precision the precision to set for the aggregation + * @return the {@link GeoGridAggregationBuilder} builder + */ + public abstract GeoGridAggregationBuilder precision(int precision); public int precision() { return precision; @@ -154,7 +151,7 @@ public int shardSize() { if (requiredSize <= 0 || shardSize <= 0) { throw new ElasticsearchException( - "parameters [required_size] and [shard_size] must be >0 in geohash_grid aggregation [" + name + "]."); + "parameters [required_size] and [shard_size] must be > 0 in " + getType() + " aggregation [" + name + "]."); } if (shardSize < requiredSize) { @@ -166,10 +163,10 @@ public int shardSize() { @Override protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(GeoHashGridParams.FIELD_PRECISION.getPreferredName(), precision); - builder.field(GeoHashGridParams.FIELD_SIZE.getPreferredName(), requiredSize); + builder.field(FIELD_PRECISION.getPreferredName(), precision); + builder.field(FIELD_SIZE.getPreferredName(), requiredSize); if (shardSize > -1) { - builder.field(GeoHashGridParams.FIELD_SHARD_SIZE.getPreferredName(), shardSize); + builder.field(FIELD_SHARD_SIZE.getPreferredName(), shardSize); } return builder; } @@ -193,11 +190,4 @@ protected boolean innerEquals(Object obj) { protected int innerHashCode() { return Objects.hash(precision, requiredSize, shardSize); } - - @Override - public String getType() { - return NAME; - } - - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java new file mode 100644 index 0000000000000..a02bb4c6c1a55 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -0,0 +1,192 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.ScoreMode; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; +import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Aggregates data expressed as GeoHash longs (for efficiency's sake) but formats results as Geohash strings. + */ +public abstract class GeoGridAggregator extends BucketsAggregator { + + protected final int requiredSize; + protected final int shardSize; + protected final CellIdSource valuesSource; + protected final LongHash bucketOrds; + + GeoGridAggregator(String name, AggregatorFactories factories, CellIdSource valuesSource, + int requiredSize, int shardSize, SearchContext aggregationContext, Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); + this.valuesSource = valuesSource; + this.requiredSize = requiredSize; + this.shardSize = shardSize; + bucketOrds = new LongHash(1, aggregationContext.bigArrays()); + } + + @Override + public ScoreMode scoreMode() { + if (valuesSource != null && valuesSource.needsScores()) { + return ScoreMode.COMPLETE; + } + return super.scoreMode(); + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, + final LeafBucketCollector sub) throws IOException { + final SortedNumericDocValues values = valuesSource.longValues(ctx); + return new LeafBucketCollectorBase(sub, null) { + @Override + public void collect(int doc, long bucket) throws IOException { + assert bucket == 0; + if (values.advanceExact(doc)) { + final int valuesCount = values.docValueCount(); + + long previous = Long.MAX_VALUE; + for (int i = 0; i < valuesCount; ++i) { + final long val = values.nextValue(); + if (previous != val || i == 0) { + long bucketOrdinal = bucketOrds.add(val); + if (bucketOrdinal < 0) { // already seen + bucketOrdinal = -1 - bucketOrdinal; + collectExistingBucket(sub, doc, bucketOrdinal); + } else { + collectBucket(sub, doc, bucketOrdinal); + } + previous = val; + } + } + } + } + }; + } + + // private impl that stores a bucket ord. This allows for computing the aggregations lazily. + static class OrdinalBucket extends InternalGeoGridBucket { + + long bucketOrd; + InternalGeoGridBucket sourceBucket; // used to keep track of appropriate getKeyAsString method + + OrdinalBucket(InternalGeoGridBucket sourceBucket) { + super(sourceBucket.geohashAsLong, sourceBucket.docCount, sourceBucket.aggregations); + this.sourceBucket = sourceBucket; + } + + void geohashAsLong(long geohashAsLong) { + this.geohashAsLong = geohashAsLong; + this.sourceBucket.geohashAsLong = geohashAsLong; + } + + @Override + InternalGeoGridBucket buildBucket(InternalGeoGridBucket bucket, long geoHashAsLong, long docCount, + InternalAggregations aggregations) { + OrdinalBucket ordBucket = new OrdinalBucket(bucket); + ordBucket.geohashAsLong = geoHashAsLong; + ordBucket.docCount = docCount; + ordBucket.aggregations = aggregations; + // this is done because the aggregator may be rebuilt from cache (non OrdinalBucket), + // or it may be rebuilding from a new calculation, and therefore copying bucketOrd. + if (bucket instanceof OrdinalBucket) { + ordBucket.bucketOrd = ((OrdinalBucket) bucket).bucketOrd; + } + return ordBucket; + } + + @Override + public Object getKey() { + return sourceBucket.getKey(); + } + + @Override + public String getKeyAsString() { + return sourceBucket.getKeyAsString(); + } + } + + abstract T buildAggregation(String name, int requiredSize, List buckets, + List pipelineAggregators, Map metaData); + + /** + * This method is used to return a re-usable instance of the bucket when building + * the aggregation. + * @return a new {@link InternalGeoGridBucket} implementation with empty parameters + */ + abstract InternalGeoGridBucket newEmptyBucket(); + + @Override + public InternalGeoGrid buildAggregation(long owningBucketOrdinal) throws IOException { + assert owningBucketOrdinal == 0; + final int size = (int) Math.min(bucketOrds.size(), shardSize); + consumeBucketsAndMaybeBreak(size); + + BucketPriorityQueue ordered = new BucketPriorityQueue(size); + OrdinalBucket spare = null; + for (long i = 0; i < bucketOrds.size(); i++) { + if (spare == null) { + spare = new OrdinalBucket(newEmptyBucket()); + } + + // need a special function to keep the source bucket + // up-to-date so it can get the appropriate key + spare.geohashAsLong(bucketOrds.get(i)); + spare.docCount = bucketDocCount(i); + spare.bucketOrd = i; + spare = (OrdinalBucket) ordered.insertWithOverflow(spare); + } + + final InternalGeoGridBucket[] list = new InternalGeoGridBucket[ordered.size()]; + for (int i = ordered.size() - 1; i >= 0; --i) { + final OrdinalBucket bucket = (OrdinalBucket) ordered.pop(); + bucket.aggregations = bucketAggregations(bucket.bucketOrd); + list[i] = bucket; + } + return buildAggregation(name, requiredSize, Arrays.asList(list), pipelineAggregators(), metaData()); + } + + @Override + public InternalGeoGrid buildEmptyAggregation() { + return buildAggregation(name, requiredSize, Collections.emptyList(), pipelineAggregators(), metaData()); + } + + + @Override + public void doClose() { + Releasables.close(bucketOrds); + } + +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java new file mode 100644 index 0000000000000..416634011de0e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; + +import java.io.IOException; +import java.util.Map; + +public class GeoHashGridAggregationBuilder extends GeoGridAggregationBuilder { + public static final String NAME = "geohash_grid"; + public static final int DEFAULT_PRECISION = 5; + public static final int DEFAULT_MAX_NUM_CELLS = 10000; + + private static final ObjectParser PARSER = createParser(NAME, GeoUtils::parsePrecision); + + public GeoHashGridAggregationBuilder(String name) { + super(name); + precision(DEFAULT_PRECISION); + size(DEFAULT_MAX_NUM_CELLS); + shardSize = -1; + } + + public GeoHashGridAggregationBuilder(StreamInput in) throws IOException { + super(in); + } + + @Override + public GeoGridAggregationBuilder precision(int precision) { + this.precision = GeoUtils.checkPrecisionRange(precision); + return this; + } + + private GeoHashGridAggregationBuilder(GeoHashGridAggregationBuilder clone, AggregatorFactories.Builder factoriesBuilder, + Map metaData) { + super(clone, factoriesBuilder, metaData); + } + + @Override + protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map metaData) { + return new GeoHashGridAggregationBuilder(this, factoriesBuilder, metaData); + } + + public static GeoGridAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { + return PARSER.parse(parser, new GeoHashGridAggregationBuilder(aggregationName), null); + } + + @Override + public String getType() { + return NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index 1ead747bb93e2..54d1e2e940649 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -18,21 +18,12 @@ */ package org.elasticsearch.search.aggregations.bucket.geogrid; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.search.ScoreMode; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.LeafBucketCollector; -import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -40,98 +31,18 @@ /** * Aggregates data expressed as GeoHash longs (for efficiency's sake) but formats results as Geohash strings. */ -public class GeoHashGridAggregator extends BucketsAggregator { - - private final int requiredSize; - private final int shardSize; - private final CellIdSource valuesSource; - private final LongHash bucketOrds; +public class GeoHashGridAggregator extends GeoGridAggregator { GeoHashGridAggregator(String name, AggregatorFactories factories, CellIdSource valuesSource, - int requiredSize, int shardSize, SearchContext aggregationContext, Aggregator parent, - List pipelineAggregators, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); - this.valuesSource = valuesSource; - this.requiredSize = requiredSize; - this.shardSize = shardSize; - bucketOrds = new LongHash(1, aggregationContext.bigArrays()); + int requiredSize, int shardSize, SearchContext aggregationContext, Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { + super(name, factories, valuesSource, requiredSize, shardSize, aggregationContext, parent, pipelineAggregators, metaData); } @Override - public ScoreMode scoreMode() { - if (valuesSource != null && valuesSource.needsScores()) { - return ScoreMode.COMPLETE; - } - return super.scoreMode(); - } - - @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, - final LeafBucketCollector sub) throws IOException { - final SortedNumericDocValues values = valuesSource.longValues(ctx); - return new LeafBucketCollectorBase(sub, null) { - @Override - public void collect(int doc, long bucket) throws IOException { - assert bucket == 0; - if (values.advanceExact(doc)) { - final int valuesCount = values.docValueCount(); - - long previous = Long.MAX_VALUE; - for (int i = 0; i < valuesCount; ++i) { - final long val = values.nextValue(); - if (previous != val || i == 0) { - long bucketOrdinal = bucketOrds.add(val); - if (bucketOrdinal < 0) { // already seen - bucketOrdinal = -1 - bucketOrdinal; - collectExistingBucket(sub, doc, bucketOrdinal); - } else { - collectBucket(sub, doc, bucketOrdinal); - } - previous = val; - } - } - } - } - }; - } - - // private impl that stores a bucket ord. This allows for computing the aggregations lazily. - static class OrdinalBucket extends GeoGridBucket { - - long bucketOrd; - - OrdinalBucket() { - super(0, 0, null); - } - - } - - @Override - public InternalGeoHashGrid buildAggregation(long owningBucketOrdinal) throws IOException { - assert owningBucketOrdinal == 0; - final int size = (int) Math.min(bucketOrds.size(), shardSize); - consumeBucketsAndMaybeBreak(size); - - InternalGeoHashGrid.BucketPriorityQueue ordered = new InternalGeoHashGrid.BucketPriorityQueue(size); - OrdinalBucket spare = null; - for (long i = 0; i < bucketOrds.size(); i++) { - if (spare == null) { - spare = new OrdinalBucket(); - } - - spare.geohashAsLong = bucketOrds.get(i); - spare.docCount = bucketDocCount(i); - spare.bucketOrd = i; - spare = (OrdinalBucket) ordered.insertWithOverflow(spare); - } - - final GeoGridBucket[] list = new GeoGridBucket[ordered.size()]; - for (int i = ordered.size() - 1; i >= 0; --i) { - final OrdinalBucket bucket = (OrdinalBucket) ordered.pop(); - bucket.aggregations = bucketAggregations(bucket.bucketOrd); - list[i] = bucket; - } - return new InternalGeoHashGrid(name, requiredSize, Arrays.asList(list), pipelineAggregators(), metaData()); + InternalGeoHashGrid buildAggregation(String name, int requiredSize, List buckets, + List pipelineAggregators, Map metaData) { + return new InternalGeoHashGrid(name, requiredSize, buckets, pipelineAggregators, metaData); } @Override @@ -139,10 +50,7 @@ public InternalGeoHashGrid buildEmptyAggregation() { return new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), pipelineAggregators(), metaData()); } - - @Override - public void doClose() { - Releasables.close(bucketOrds); + InternalGeoGridBucket newEmptyBucket() { + return new InternalGeoHashGridBucket(0, 0, null); } - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index b7cb50b5f44c0..e4b99b0c9656e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; +import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -70,10 +71,8 @@ protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } - CellIdSource cellIdSource = new CellIdSource(valuesSource, precision); + CellIdSource cellIdSource = new CellIdSource(valuesSource, precision, GeoHashUtils::longEncode); return new GeoHashGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, context, parent, pipelineAggregators, metaData); - } - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java new file mode 100644 index 0000000000000..9608ac914c0aa --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.LongObjectPagedHashMap; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.unmodifiableList; + +/** + * Represents a grid of cells where each cell's location is determined by a specific geo hashing algorithm. + * All geo-grid hash-encoding in a grid are of the same precision and held internally as a single long + * for efficiency's sake. + */ +public abstract class InternalGeoGrid + extends InternalMultiBucketAggregation implements GeoGrid { + + protected final int requiredSize; + protected final List buckets; + + InternalGeoGrid(String name, int requiredSize, List buckets, List pipelineAggregators, + Map metaData) { + super(name, pipelineAggregators, metaData); + this.requiredSize = requiredSize; + this.buckets = buckets; + } + + abstract Writeable.Reader getBucketReader(); + + /** + * Read from a stream. + */ + public InternalGeoGrid(StreamInput in) throws IOException { + super(in); + requiredSize = readSize(in); + buckets = (List) in.readList(getBucketReader()); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + writeSize(requiredSize, out); + out.writeList(buckets); + } + + abstract InternalGeoGrid create(String name, int requiredSize, List buckets, + List pipelineAggregators, Map metaData); + + @Override + public List getBuckets() { + return unmodifiableList(buckets); + } + + @Override + public InternalGeoGrid doReduce(List aggregations, ReduceContext reduceContext) { + LongObjectPagedHashMap> buckets = null; + for (InternalAggregation aggregation : aggregations) { + InternalGeoGrid grid = (InternalGeoGrid) aggregation; + if (buckets == null) { + buckets = new LongObjectPagedHashMap<>(grid.buckets.size(), reduceContext.bigArrays()); + } + for (Object obj : grid.buckets) { + B bucket = (B) obj; + List existingBuckets = buckets.get(bucket.geohashAsLong()); + if (existingBuckets == null) { + existingBuckets = new ArrayList<>(aggregations.size()); + buckets.put(bucket.geohashAsLong(), existingBuckets); + } + existingBuckets.add(bucket); + } + } + + final int size = Math.toIntExact(reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size())); + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + for (LongObjectPagedHashMap.Cursor> cursor : buckets) { + List sameCellBuckets = cursor.value; + InternalGeoGridBucket removed = ordered.insertWithOverflow(sameCellBuckets.get(0).reduce(sameCellBuckets, reduceContext)); + if (removed != null) { + reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(removed)); + } else { + reduceContext.consumeBucketsAndMaybeBreak(1); + } + } + buckets.close(); + InternalGeoGridBucket[] list = new InternalGeoGridBucket[ordered.size()]; + for (int i = ordered.size() - 1; i >= 0; i--) { + list[i] = ordered.pop(); + } + return create(getName(), requiredSize, Arrays.asList(list), pipelineAggregators(), getMetaData()); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + builder.startArray(CommonFields.BUCKETS.getPreferredName()); + for (InternalGeoGridBucket bucket : buckets) { + bucket.toXContent(builder, params); + } + builder.endArray(); + return builder; + } + + // package protected for testing + int getRequiredSize() { + return requiredSize; + } + + @Override + protected int doHashCode() { + return Objects.hash(requiredSize, buckets); + } + + @Override + protected boolean doEquals(Object obj) { + InternalGeoGrid other = (InternalGeoGrid) obj; + return Objects.equals(requiredSize, other.requiredSize) && + Objects.equals(buckets, other.buckets); + } + +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java similarity index 80% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridBucket.java rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java index 850bc6e7bafcb..2184ed76e5071 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.search.aggregations.bucket.geogrid; -import org.elasticsearch.common.geo.GeoHashUtils; -import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -34,13 +32,14 @@ import java.util.List; import java.util.Objects; -public class GeoGridBucket extends InternalMultiBucketAggregation.InternalBucket implements GeoHashGrid.Bucket, Comparable { +public abstract class InternalGeoGridBucket + extends InternalMultiBucketAggregation.InternalBucket implements GeoGrid.Bucket, Comparable { protected long geohashAsLong; protected long docCount; protected InternalAggregations aggregations; - GeoGridBucket(long geohashAsLong, long docCount, InternalAggregations aggregations) { + public InternalGeoGridBucket(long geohashAsLong, long docCount, InternalAggregations aggregations) { this.docCount = docCount; this.aggregations = aggregations; this.geohashAsLong = geohashAsLong; @@ -49,7 +48,7 @@ public class GeoGridBucket extends InternalMultiBucketAggregation.InternalBucket /** * Read from a stream. */ - GeoGridBucket(StreamInput in) throws IOException { + public InternalGeoGridBucket(StreamInput in) throws IOException { geohashAsLong = in.readLong(); docCount = in.readVLong(); aggregations = InternalAggregations.readAggregations(in); @@ -62,14 +61,11 @@ public void writeTo(StreamOutput out) throws IOException { aggregations.writeTo(out); } - @Override - public String getKeyAsString() { - return GeoHashUtils.stringEncode(geohashAsLong); - } + abstract B buildBucket(InternalGeoGridBucket bucket, long geoHashAsLong, long docCount, InternalAggregations aggregations); - @Override - public GeoPoint getKey() { - return GeoPoint.fromGeohash(geohashAsLong); + + long geohashAsLong() { + return geohashAsLong; } @Override @@ -83,7 +79,7 @@ public Aggregations getAggregations() { } @Override - public int compareTo(GeoGridBucket other) { + public int compareTo(InternalGeoGridBucket other) { if (this.geohashAsLong > other.geohashAsLong) { return 1; } @@ -93,15 +89,15 @@ public int compareTo(GeoGridBucket other) { return 0; } - public GeoGridBucket reduce(List buckets, InternalAggregation.ReduceContext context) { + public B reduce(List buckets, InternalAggregation.ReduceContext context) { List aggregationsList = new ArrayList<>(buckets.size()); long docCount = 0; - for (GeoGridBucket bucket : buckets) { + for (B bucket : buckets) { docCount += bucket.docCount; aggregationsList.add(bucket.aggregations); } final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); - return new GeoGridBucket(geohashAsLong, docCount, aggs); + return buildBucket(this, geohashAsLong, docCount, aggs); } @Override @@ -118,7 +114,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - GeoGridBucket bucket = (GeoGridBucket) o; + InternalGeoGridBucket bucket = (InternalGeoGridBucket) o; return geohashAsLong == bucket.geohashAsLong && docCount == bucket.docCount && Objects.equals(aggregations, bucket.aggregations); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index 6f887e644b349..57eacdcb4ec7f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -18,158 +18,54 @@ */ package org.elasticsearch.search.aggregations.bucket.geogrid; -import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.LongObjectPagedHashMap; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.Objects; - -import static java.util.Collections.unmodifiableList; /** * Represents a grid of cells where each cell's location is determined by a geohash. * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. */ -public class InternalGeoHashGrid extends InternalMultiBucketAggregation implements - GeoHashGrid { +public class InternalGeoHashGrid extends InternalGeoGrid { - private final int requiredSize; - private final List buckets; + private static final String NAME = "geohash_grid"; - InternalGeoHashGrid(String name, int requiredSize, List buckets, List pipelineAggregators, - Map metaData) { - super(name, pipelineAggregators, metaData); - this.requiredSize = requiredSize; - this.buckets = buckets; + InternalGeoHashGrid(String name, int requiredSize, List buckets, + List pipelineAggregators, Map metaData) { + super(name, requiredSize, buckets, pipelineAggregators, metaData); } - /** - * Read from a stream. - */ public InternalGeoHashGrid(StreamInput in) throws IOException { super(in); - requiredSize = readSize(in); - buckets = in.readList(GeoGridBucket::new); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - writeSize(requiredSize, out); - out.writeList(buckets); - } - - @Override - public String getWriteableName() { - return GeoGridAggregationBuilder.NAME; - } - - @Override - public InternalGeoHashGrid create(List buckets) { - return new InternalGeoHashGrid(this.name, this.requiredSize, buckets, this.pipelineAggregators(), this.metaData); } @Override - public GeoGridBucket createBucket(InternalAggregations aggregations, GeoGridBucket prototype) { - return new GeoGridBucket(prototype.geohashAsLong, prototype.docCount, aggregations); + public InternalGeoGrid create(List buckets) { + return new InternalGeoHashGrid(name, requiredSize, buckets, pipelineAggregators(), metaData); } @Override - public List getBuckets() { - return unmodifiableList(buckets); + public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + return new InternalGeoHashGridBucket(prototype.geohashAsLong, prototype.docCount, aggregations); } @Override - public InternalGeoHashGrid doReduce(List aggregations, ReduceContext reduceContext) { - LongObjectPagedHashMap> buckets = null; - for (InternalAggregation aggregation : aggregations) { - InternalGeoHashGrid grid = (InternalGeoHashGrid) aggregation; - if (buckets == null) { - buckets = new LongObjectPagedHashMap<>(grid.buckets.size(), reduceContext.bigArrays()); - } - for (GeoGridBucket bucket : grid.buckets) { - List existingBuckets = buckets.get(bucket.geohashAsLong); - if (existingBuckets == null) { - existingBuckets = new ArrayList<>(aggregations.size()); - buckets.put(bucket.geohashAsLong, existingBuckets); - } - existingBuckets.add(bucket); - } - } - - final int size = Math.toIntExact(reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size())); - BucketPriorityQueue ordered = new BucketPriorityQueue(size); - for (LongObjectPagedHashMap.Cursor> cursor : buckets) { - List sameCellBuckets = cursor.value; - GeoGridBucket removed = ordered.insertWithOverflow(sameCellBuckets.get(0).reduce(sameCellBuckets, reduceContext)); - if (removed != null) { - reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(removed)); - } else { - reduceContext.consumeBucketsAndMaybeBreak(1); - } - } - buckets.close(); - GeoGridBucket[] list = new GeoGridBucket[ordered.size()]; - for (int i = ordered.size() - 1; i >= 0; i--) { - list[i] = ordered.pop(); - } - return new InternalGeoHashGrid(getName(), requiredSize, Arrays.asList(list), pipelineAggregators(), getMetaData()); - } - - @Override - public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.startArray(CommonFields.BUCKETS.getPreferredName()); - for (GeoGridBucket bucket : buckets) { - bucket.toXContent(builder, params); - } - builder.endArray(); - return builder; - } - - // package protected for testing - int getRequiredSize() { - return requiredSize; + InternalGeoGrid create(String name, int requiredSize, List buckets, List list, Map metaData) { + return new InternalGeoHashGrid(name, requiredSize, buckets, list, metaData); } @Override - protected int doHashCode() { - return Objects.hash(requiredSize, buckets); + Reader getBucketReader() { + return InternalGeoHashGridBucket::new; } @Override - protected boolean doEquals(Object obj) { - InternalGeoHashGrid other = (InternalGeoHashGrid) obj; - return Objects.equals(requiredSize, other.requiredSize) && - Objects.equals(buckets, other.buckets); - } - - static class BucketPriorityQueue extends PriorityQueue { - - BucketPriorityQueue(int size) { - super(size); - } - - @Override - protected boolean lessThan(GeoGridBucket o1, GeoGridBucket o2) { - int cmp = Long.compare(o2.getDocCount(), o1.getDocCount()); - if (cmp == 0) { - cmp = o2.compareTo(o1); - if (cmp == 0) { - cmp = System.identityHashCode(o2) - System.identityHashCode(o1); - } - } - return cmp > 0; - } + public String getWriteableName() { + return NAME; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java new file mode 100644 index 0000000000000..ed4df7c0d0d86 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoHashUtils; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.aggregations.InternalAggregations; + +import java.io.IOException; + +public class InternalGeoHashGridBucket extends InternalGeoGridBucket { + InternalGeoHashGridBucket(long geohashAsLong, long docCount, InternalAggregations aggregations) { + super(geohashAsLong, docCount, aggregations); + } + + /** + * Read from a stream. + */ + public InternalGeoHashGridBucket(StreamInput in) throws IOException { + super(in); + } + + @Override + InternalGeoHashGridBucket buildBucket(InternalGeoGridBucket bucket, long geoHashAsLong, long docCount, + InternalAggregations aggregations) { + return new InternalGeoHashGridBucket(geoHashAsLong, docCount, aggregations); + } + + @Override + public String getKeyAsString() { + return GeoHashUtils.stringEncode(geohashAsLong); + } + + @Override + public GeoPoint getKey() { + return GeoPoint.fromGeohash(geohashAsLong); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java new file mode 100644 index 0000000000000..cd19dc84c21f6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGrid.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; + +import java.io.IOException; +import java.util.List; +import java.util.function.Supplier; + +public abstract class ParsedGeoGrid extends ParsedMultiBucketAggregation implements GeoGrid { + + @Override + public List getBuckets() { + return buckets; + } + + public static ObjectParser createParser(Supplier supplier, + CheckedFunction bucketParser, + CheckedFunction keyedBucketParser) { + ObjectParser parser = new ObjectParser<>(ParsedGeoGrid.class.getSimpleName(), true, supplier); + declareMultiBucketAggregationFields(parser, bucketParser, keyedBucketParser); + return parser; + } + + protected void setName(String name) { + super.setName(name); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java similarity index 59% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java index ff3b21a3a7bae..493b77f547f75 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java @@ -18,19 +18,18 @@ */ package org.elasticsearch.search.aggregations.bucket.geogrid; -import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; -/** - * Encapsulates relevant parameter defaults and validations for the geo hash grid aggregation. - */ -final class GeoHashGridParams { +import java.io.IOException; + +public abstract class ParsedGeoGridBucket extends ParsedMultiBucketAggregation.ParsedBucket implements GeoGrid.Bucket { - /* recognized field names in JSON */ - static final ParseField FIELD_PRECISION = new ParseField("precision"); - static final ParseField FIELD_SIZE = new ParseField("size"); - static final ParseField FIELD_SHARD_SIZE = new ParseField("shard_size"); + protected String geohashAsString; - private GeoHashGridParams() { - throw new AssertionError("No instances intended"); + @Override + protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { + return builder.field(Aggregation.CommonFields.KEY.getPreferredName(), geohashAsString); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java index 4551523e0fc8b..b9af237eb6323 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java @@ -19,60 +19,24 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; -import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; import java.io.IOException; -import java.util.List; -public class ParsedGeoHashGrid extends ParsedMultiBucketAggregation implements GeoHashGrid { +public class ParsedGeoHashGrid extends ParsedGeoGrid { - @Override - public String getType() { - return GeoGridAggregationBuilder.NAME; - } - - @Override - public List getBuckets() { - return buckets; - } - - private static ObjectParser PARSER = - new ObjectParser<>(ParsedGeoHashGrid.class.getSimpleName(), true, ParsedGeoHashGrid::new); - static { - declareMultiBucketAggregationFields(PARSER, ParsedBucket::fromXContent, ParsedBucket::fromXContent); - } + private static ObjectParser PARSER = createParser(ParsedGeoHashGrid::new, + ParsedGeoHashGridBucket::fromXContent, ParsedGeoHashGridBucket::fromXContent); - public static ParsedGeoHashGrid fromXContent(XContentParser parser, String name) throws IOException { - ParsedGeoHashGrid aggregation = PARSER.parse(parser, null); + public static ParsedGeoGrid fromXContent(XContentParser parser, String name) throws IOException { + ParsedGeoGrid aggregation = PARSER.parse(parser, null); aggregation.setName(name); return aggregation; } - public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements GeoHashGrid.Bucket { - - private String geohashAsString; - - @Override - public GeoPoint getKey() { - return GeoPoint.fromGeohash(geohashAsString); - } - - @Override - public String getKeyAsString() { - return geohashAsString; - } - - @Override - protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { - return builder.field(CommonFields.KEY.getPreferredName(), geohashAsString); - } - - static ParsedBucket fromXContent(XContentParser parser) throws IOException { - return parseXContent(parser, false, ParsedBucket::new, (p, bucket) -> bucket.geohashAsString = p.textOrNull()); - } + @Override + public String getType() { + return GeoHashGridAggregationBuilder.NAME; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java new file mode 100644 index 0000000000000..fe7846692a1f0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { + + @Override + public GeoPoint getKey() { + return GeoPoint.fromGeohash(geohashAsString); + } + + @Override + public String getKeyAsString() { + return geohashAsString; + } + + static ParsedGeoHashGridBucket fromXContent(XContentParser parser) throws IOException { + return parseXContent(parser, false, ParsedGeoHashGridBucket::new, (p, bucket) -> bucket.geohashAsString = p.textOrNull()); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java index c41fa29bde3db..a92cf117accf1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java @@ -22,7 +22,7 @@ import org.elasticsearch.search.aggregations.bucket.composite.InternalComposite; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilters; -import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoGrid; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal; import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; @@ -104,7 +104,7 @@ public static boolean hasValue(InternalFilter agg) { return agg.getDocCount() > 0; } - public static boolean hasValue(InternalGeoHashGrid agg) { + public static boolean hasValue(InternalGeoGrid agg) { return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java index ef001b35feffb..874623132f36a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java @@ -35,7 +35,7 @@ import org.elasticsearch.search.aggregations.bucket.composite.InternalCompositeTests; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilterTests; import org.elasticsearch.search.aggregations.bucket.filter.InternalFiltersTests; -import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGridTests; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridTests; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobalTests; import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogramTests; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; @@ -139,7 +139,7 @@ private static List> getAggsTests() { aggsTests.add(new InternalGlobalTests()); aggsTests.add(new InternalFilterTests()); aggsTests.add(new InternalSamplerTests()); - aggsTests.add(new InternalGeoHashGridTests()); + aggsTests.add(new GeoHashGridTests()); aggsTests.add(new InternalRangeTests()); aggsTests.add(new InternalDateRangeTests()); aggsTests.add(new InternalGeoDistanceTests()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index d264de22e2155..a77eb02c1a034 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -32,8 +32,8 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid.Bucket; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGrid.Bucket; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.VersionUtils; @@ -154,12 +154,12 @@ public void testSimple() throws Exception { assertSearchResponse(response); - GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid"); + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); List buckets = geoGrid.getBuckets(); Object[] propertiesKeys = (Object[]) ((InternalAggregation)geoGrid).getProperty("_key"); Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)geoGrid).getProperty("_count"); for (int i = 0; i < buckets.size(); i++) { - GeoHashGrid.Bucket cell = buckets.get(i); + GeoGrid.Bucket cell = buckets.get(i); String geohash = cell.getKeyAsString(); long bucketCount = cell.getDocCount(); @@ -185,8 +185,8 @@ public void testMultivalued() throws Exception { assertSearchResponse(response); - GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid"); - for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { String geohash = cell.getKeyAsString(); long bucketCount = cell.getDocCount(); @@ -217,8 +217,8 @@ public void testFiltered() throws Exception { Filter filter = response.getAggregations().get("filtered"); - GeoHashGrid geoGrid = filter.getAggregations().get("geohashgrid"); - for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) { + GeoGrid geoGrid = filter.getAggregations().get("geohashgrid"); + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { String geohash = cell.getKeyAsString(); long bucketCount = cell.getDocCount(); int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); @@ -242,7 +242,7 @@ public void testUnmapped() throws Exception { assertSearchResponse(response); - GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid"); + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); assertThat(geoGrid.getBuckets().size(), equalTo(0)); } @@ -259,8 +259,8 @@ public void testPartiallyUnmapped() throws Exception { assertSearchResponse(response); - GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid"); - for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) { + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { String geohash = cell.getKeyAsString(); long bucketCount = cell.getDocCount(); @@ -285,10 +285,10 @@ public void testTopMatch() throws Exception { assertSearchResponse(response); - GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid"); + GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); //Check we only have one bucket with the best match for that resolution assertThat(geoGrid.getBuckets().size(), equalTo(1)); - for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) { + for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { String geohash = cell.getKeyAsString(); long bucketCount = cell.getDocCount(); int expectedBucketCount = 0; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java index fb7b3984cdf2b..e414b86403a09 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java @@ -21,13 +21,14 @@ import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; public class GeoHashGridTests extends BaseAggregationTestCase { @Override - protected GeoGridAggregationBuilder createTestAggregatorBuilder() { + protected GeoHashGridAggregationBuilder createTestAggregatorBuilder() { String name = randomAlphaOfLengthBetween(3, 20); - GeoGridAggregationBuilder factory = new GeoGridAggregationBuilder(name); + GeoHashGridAggregationBuilder factory = new GeoHashGridAggregationBuilder(name); if (randomBoolean()) { int precision = randomIntBetween(1, 12); factory.precision(precision); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java index 18ab80305dd15..664edba7db0d8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.filter.Filter; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGrid; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -301,7 +301,7 @@ public void testGeoHashGrid() throws Exception { assertSearchResponse(response); - GeoHashGrid grid = response.getAggregations().get("grid"); + GeoGrid grid = response.getAggregations().get("grid"); Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(4)); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java index 2d270f8298ff1..ce4a065ef4c77 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java @@ -94,7 +94,7 @@ public void testWithSeveralDocs() throws IOException { } }, geoHashGrid -> { assertEquals(expectedCountPerGeoHash.size(), geoHashGrid.getBuckets().size()); - for (GeoHashGrid.Bucket bucket : geoHashGrid.getBuckets()) { + for (GeoGrid.Bucket bucket : geoHashGrid.getBuckets()) { assertEquals((long) expectedCountPerGeoHash.get(bucket.getKeyAsString()), bucket.getDocCount()); } assertTrue(AggregationInspectionHelper.hasValue(geoHashGrid)); @@ -111,7 +111,7 @@ private void testCase(Query query, String field, int precision, CheckedConsumer< IndexReader indexReader = DirectoryReader.open(directory); IndexSearcher indexSearcher = newSearcher(indexReader, true, true); - GeoGridAggregationBuilder aggregationBuilder = new GeoGridAggregationBuilder("_name").field(field); + GeoGridAggregationBuilder aggregationBuilder = new GeoHashGridAggregationBuilder("_name").field(field); aggregationBuilder.precision(precision); MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType(); fieldType.setHasDocValues(true); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java index e431bf19ff3de..0afbc433952e9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java @@ -38,7 +38,7 @@ public void testParseValidFromInts() throws Exception { XContentParser.Token token = stParser.nextToken(); assertSame(XContentParser.Token.START_OBJECT, token); // can create a factory - assertNotNull(GeoGridAggregationBuilder.parse("geohash_grid", stParser)); + assertNotNull(GeoHashGridAggregationBuilder.parse("geohash_grid", stParser)); } public void testParseValidFromStrings() throws Exception { @@ -48,7 +48,7 @@ public void testParseValidFromStrings() throws Exception { XContentParser.Token token = stParser.nextToken(); assertSame(XContentParser.Token.START_OBJECT, token); // can create a factory - assertNotNull(GeoGridAggregationBuilder.parse("geohash_grid", stParser)); + assertNotNull(GeoHashGridAggregationBuilder.parse("geohash_grid", stParser)); } public void testParseDistanceUnitPrecision() throws Exception { @@ -63,7 +63,7 @@ public void testParseDistanceUnitPrecision() throws Exception { XContentParser.Token token = stParser.nextToken(); assertSame(XContentParser.Token.START_OBJECT, token); // can create a factory - GeoGridAggregationBuilder builder = GeoGridAggregationBuilder.parse("geohash_grid", stParser); + GeoGridAggregationBuilder builder = GeoHashGridAggregationBuilder.parse("geohash_grid", stParser); assertNotNull(builder); assertThat(builder.precision(), greaterThanOrEqualTo(0)); assertThat(builder.precision(), lessThanOrEqualTo(12)); @@ -75,7 +75,7 @@ public void testParseInvalidUnitPrecision() throws Exception { XContentParser.Token token = stParser.nextToken(); assertSame(XContentParser.Token.START_OBJECT, token); XContentParseException ex = expectThrows(XContentParseException.class, - () -> GeoGridAggregationBuilder.parse("geohash_grid", stParser)); + () -> GeoHashGridAggregationBuilder.parse("geohash_grid", stParser)); assertThat(ex.getMessage(), containsString("[geohash_grid] failed to parse field [precision]")); assertThat(ex.getCause(), instanceOf(NumberFormatException.class)); assertEquals("For input string: \"10kg\"", ex.getCause().getMessage()); @@ -87,7 +87,7 @@ public void testParseDistanceUnitPrecisionTooSmall() throws Exception { XContentParser.Token token = stParser.nextToken(); assertSame(XContentParser.Token.START_OBJECT, token); XContentParseException ex = expectThrows(XContentParseException.class, - () -> GeoGridAggregationBuilder.parse("geohash_grid", stParser)); + () -> GeoHashGridAggregationBuilder.parse("geohash_grid", stParser)); assertThat(ex.getMessage(), containsString("[geohash_grid] failed to parse field [precision]")); assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); assertEquals("precision too high [1cm]", ex.getCause().getMessage()); @@ -98,7 +98,7 @@ public void testParseErrorOnBooleanPrecision() throws Exception { XContentParser.Token token = stParser.nextToken(); assertSame(XContentParser.Token.START_OBJECT, token); XContentParseException e = expectThrows(XContentParseException.class, - () -> GeoGridAggregationBuilder.parse("geohash_grid", stParser)); + () -> GeoHashGridAggregationBuilder.parse("geohash_grid", stParser)); assertThat(ExceptionsHelper.detailedMessage(e), containsString("[geohash_grid] precision doesn't support values of type: VALUE_BOOLEAN")); } @@ -108,7 +108,7 @@ public void testParseErrorOnPrecisionOutOfRange() throws Exception { XContentParser.Token token = stParser.nextToken(); assertSame(XContentParser.Token.START_OBJECT, token); try { - GeoGridAggregationBuilder.parse("geohash_grid", stParser); + GeoHashGridAggregationBuilder.parse("geohash_grid", stParser); fail(); } catch (XContentParseException ex) { assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGridTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java similarity index 75% rename from server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGridTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java index 78016833dbc0f..8c291e69fabe1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGridTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java @@ -31,7 +31,9 @@ import java.util.List; import java.util.Map; -public class InternalGeoHashGridTests extends InternalMultiBucketAggregationTestCase { +import static org.hamcrest.Matchers.equalTo; + +public class GeoHashGridTests extends InternalMultiBucketAggregationTestCase { @Override protected int minNumberOfBuckets() { @@ -49,13 +51,13 @@ protected InternalGeoHashGrid createTestInstance(String name, Map metaData, InternalAggregations aggregations) { int size = randomNumberOfBuckets(); - List buckets = new ArrayList<>(size); + List buckets = new ArrayList<>(size); for (int i = 0; i < size; i++) { double latitude = randomDoubleBetween(-90.0, 90.0, false); double longitude = randomDoubleBetween(-180.0, 180.0, false); long geoHashAsLong = GeoHashUtils.longEncode(longitude, latitude, 4); - buckets.add(new GeoGridBucket(geoHashAsLong, randomInt(IndexWriter.MAX_DOCS), aggregations)); + buckets.add(new InternalGeoHashGridBucket(geoHashAsLong, randomInt(IndexWriter.MAX_DOCS), aggregations)); } return new InternalGeoHashGrid(name, size, buckets, pipelineAggregators, metaData); } @@ -67,24 +69,23 @@ protected Writeable.Reader instanceReader() { @Override protected void assertReduced(InternalGeoHashGrid reduced, List inputs) { - Map> map = new HashMap<>(); + Map> map = new HashMap<>(); for (InternalGeoHashGrid input : inputs) { - for (GeoHashGrid.Bucket bucket : input.getBuckets()) { - GeoGridBucket internalBucket = (GeoGridBucket) bucket; - List buckets = map.get(internalBucket.geohashAsLong); + for (InternalGeoGridBucket bucket : input.getBuckets()) { + List buckets = map.get(bucket.geohashAsLong); if (buckets == null) { - map.put(internalBucket.geohashAsLong, buckets = new ArrayList<>()); + map.put(bucket.geohashAsLong, buckets = new ArrayList<>()); } - buckets.add(internalBucket); + buckets.add(bucket); } } - List expectedBuckets = new ArrayList<>(); - for (Map.Entry> entry : map.entrySet()) { + List expectedBuckets = new ArrayList<>(); + for (Map.Entry> entry : map.entrySet()) { long docCount = 0; - for (GeoGridBucket bucket : entry.getValue()) { + for (InternalGeoGridBucket bucket : entry.getValue()) { docCount += bucket.docCount; } - expectedBuckets.add(new GeoGridBucket(entry.getKey(), docCount, InternalAggregations.EMPTY)); + expectedBuckets.add(new InternalGeoHashGridBucket(entry.getKey(), docCount, InternalAggregations.EMPTY)); } expectedBuckets.sort((first, second) -> { int cmp = Long.compare(second.docCount, first.docCount); @@ -97,8 +98,8 @@ protected void assertReduced(InternalGeoHashGrid reduced, List implementationClass() { protected InternalGeoHashGrid mutateInstance(InternalGeoHashGrid instance) { String name = instance.getName(); int size = instance.getRequiredSize(); - List buckets = instance.getBuckets(); + List buckets = instance.getBuckets(); List pipelineAggregators = instance.pipelineAggregators(); Map metaData = instance.getMetaData(); switch (between(0, 3)) { @@ -123,7 +124,7 @@ protected InternalGeoHashGrid mutateInstance(InternalGeoHashGrid instance) { case 1: buckets = new ArrayList<>(buckets); buckets.add( - new GeoGridBucket(randomNonNegativeLong(), randomInt(IndexWriter.MAX_DOCS), InternalAggregations.EMPTY)); + new InternalGeoHashGridBucket(randomNonNegativeLong(), randomInt(IndexWriter.MAX_DOCS), InternalAggregations.EMPTY)); break; case 2: size = size + between(1, 10); @@ -142,4 +143,8 @@ protected InternalGeoHashGrid mutateInstance(InternalGeoHashGrid instance) { return new InternalGeoHashGrid(name, size, buckets, pipelineAggregators, metaData); } + public void testCreateFromBuckets() { + InternalGeoHashGrid original = createTestInstance(); + assertThat(original, equalTo(original.create(original.buckets))); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java index dfc503219ed74..9e9af4e65066f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java @@ -22,7 +22,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGrid; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.test.ESIntegTestCase; @@ -159,11 +159,11 @@ public void testSingleValueFieldAsSubAggToGeohashGrid() throws Exception { .get(); assertSearchResponse(response); - GeoHashGrid grid = response.getAggregations().get("geoGrid"); + GeoGrid grid = response.getAggregations().get("geoGrid"); assertThat(grid, notNullValue()); assertThat(grid.getName(), equalTo("geoGrid")); - List buckets = grid.getBuckets(); - for (GeoHashGrid.Bucket cell : buckets) { + List buckets = grid.getBuckets(); + for (GeoGrid.Bucket cell : buckets) { String geohash = cell.getKeyAsString(); GeoPoint expectedCentroid = expectedCentroidsForGeoHash.get(geohash); GeoCentroid centroidAgg = cell.getAggregations().get(aggName); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index 551110ca2520a..fd560af806066 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -49,7 +49,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilter; import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; @@ -211,7 +211,7 @@ public abstract class InternalAggregationTestCase map.put(GlobalAggregationBuilder.NAME, (p, c) -> ParsedGlobal.fromXContent(p, (String) c)); map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c)); map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c)); - map.put(GeoGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); + map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c)); map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c)); map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c)); map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c)); From 497468400365264939ac1f841bf9e619b4d2070e Mon Sep 17 00:00:00 2001 From: Andrey Ershov Date: Thu, 24 Jan 2019 19:25:55 +0100 Subject: [PATCH 15/64] Add tool elasticsearch-node unsafe-bootstrap (#37696) elasticsearch-node tool helps to restore cluster if half or more of master eligible nodes are lost. Of course, all bets are off, regarding data consistency. There are two parts of the tool: unsafe-bootstrap to be used when there is still at least one master-eligible node alive and detach-cluster, when there are no master-eligible nodes left. This commit implements the first part. Docs for the tool will be added separately as a part of #37812. --- distribution/src/bin/elasticsearch-node | 5 + distribution/src/bin/elasticsearch-node.bat | 12 + .../packaging/test/ArchiveTestCase.java | 20 +- .../packaging/util/Archives.java | 3 +- .../packaging/util/Installation.java | 1 + .../packaging/util/Packages.java | 3 +- .../resources/packaging/utils/packages.bash | 1 + .../test/resources/packaging/utils/tar.bash | 1 + .../cli/CommandLoggingConfigurator.java | 4 +- .../cluster/coordination/NodeToolCli.java | 44 +++ .../UnsafeBootstrapMasterCommand.java | 195 +++++++++++++ .../coordination/UnsafeBootstrapMasterIT.java | 262 ++++++++++++++++++ .../org/elasticsearch/test/ESTestCase.java | 8 +- 13 files changed, 552 insertions(+), 7 deletions(-) create mode 100755 distribution/src/bin/elasticsearch-node create mode 100644 distribution/src/bin/elasticsearch-node.bat create mode 100644 server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java create mode 100644 server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java diff --git a/distribution/src/bin/elasticsearch-node b/distribution/src/bin/elasticsearch-node new file mode 100755 index 0000000000000..29949486b5526 --- /dev/null +++ b/distribution/src/bin/elasticsearch-node @@ -0,0 +1,5 @@ +#!/bin/bash + +ES_MAIN_CLASS=org.elasticsearch.cluster.coordination.NodeToolCli \ + "`dirname "$0"`"/elasticsearch-cli \ + "$@" diff --git a/distribution/src/bin/elasticsearch-node.bat b/distribution/src/bin/elasticsearch-node.bat new file mode 100644 index 0000000000000..264a357cb8af4 --- /dev/null +++ b/distribution/src/bin/elasticsearch-node.bat @@ -0,0 +1,12 @@ +@echo off + +setlocal enabledelayedexpansion +setlocal enableextensions + +set ES_MAIN_CLASS=org.elasticsearch.cluster.coordination.NodeToolCli +call "%~dp0elasticsearch-cli.bat" ^ + %%* ^ + || exit /b 1 + +endlocal +endlocal diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java index f298e7681ccf5..05b3603628adb 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java @@ -301,7 +301,7 @@ public void test90SecurityCliPackaging() { } } - public void test100RepairIndexCliPackaging() { + public void test100ElasticsearchShardCliPackaging() { assumeThat(installation, is(notNullValue())); final Installation.Executables bin = installation.executables(); @@ -318,4 +318,22 @@ public void test100RepairIndexCliPackaging() { } } + public void test110ElasticsearchNodeCliPackaging() { + assumeThat(installation, is(notNullValue())); + + final Installation.Executables bin = installation.executables(); + final Shell sh = new Shell(); + + Platforms.PlatformAction action = () -> { + final Result result = sh.run(bin.elasticsearchNode + " -h"); + assertThat(result.stdout, + containsString("A CLI tool to unsafely recover a cluster after the permanent loss of too many master-eligible nodes")); + }; + + if (distribution().equals(Distribution.DEFAULT_TAR) || distribution().equals(Distribution.DEFAULT_ZIP)) { + Platforms.onLinux(action); + Platforms.onWindows(action); + } + } + } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java index cbee11ea41c5d..db48ed753b271 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java @@ -186,7 +186,8 @@ private static void verifyOssInstallation(Installation es, Distribution distribu "elasticsearch-env", "elasticsearch-keystore", "elasticsearch-plugin", - "elasticsearch-shard" + "elasticsearch-shard", + "elasticsearch-node" ).forEach(executable -> { assertThat(es.bin(executable), file(File, owner, owner, p755)); diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java index 8dea694492bc2..41b4fb9755654 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java @@ -102,6 +102,7 @@ public class Executables { public final Path elasticsearchKeystore = platformExecutable("elasticsearch-keystore"); public final Path elasticsearchCertutil = platformExecutable("elasticsearch-certutil"); public final Path elasticsearchShard = platformExecutable("elasticsearch-shard"); + public final Path elasticsearchNode = platformExecutable("elasticsearch-node"); private Path platformExecutable(String name) { final String platformExecutableName = Platforms.WINDOWS diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java index 2538cf6f3bc48..f2226bfb0c4e3 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java @@ -196,7 +196,8 @@ private static void verifyOssInstallation(Installation es, Distribution distribu "elasticsearch", "elasticsearch-plugin", "elasticsearch-keystore", - "elasticsearch-shard" + "elasticsearch-shard", + "elasticsearch-node" ).forEach(executable -> assertThat(es.bin(executable), file(File, "root", "root", p755))); Stream.of( diff --git a/qa/vagrant/src/test/resources/packaging/utils/packages.bash b/qa/vagrant/src/test/resources/packaging/utils/packages.bash index f212ee015f1f6..d86f1c64e2e69 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/packages.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/packages.bash @@ -104,6 +104,7 @@ verify_package_installation() { assert_file "$ESHOME/bin/elasticsearch" f root root 755 assert_file "$ESHOME/bin/elasticsearch-plugin" f root root 755 assert_file "$ESHOME/bin/elasticsearch-shard" f root root 755 + assert_file "$ESHOME/bin/elasticsearch-node" f root root 755 assert_file "$ESHOME/lib" d root root 755 assert_file "$ESCONFIG" d root elasticsearch 2750 assert_file "$ESCONFIG/elasticsearch.keystore" f root elasticsearch 660 diff --git a/qa/vagrant/src/test/resources/packaging/utils/tar.bash b/qa/vagrant/src/test/resources/packaging/utils/tar.bash index 95e293df05423..eb2e39274e29e 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/tar.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/tar.bash @@ -95,6 +95,7 @@ verify_archive_installation() { assert_file "$ESHOME/bin/elasticsearch-keystore" f elasticsearch elasticsearch 755 assert_file "$ESHOME/bin/elasticsearch-plugin" f elasticsearch elasticsearch 755 assert_file "$ESHOME/bin/elasticsearch-shard" f elasticsearch elasticsearch 755 + assert_file "$ESHOME/bin/elasticsearch-node" f elasticsearch elasticsearch 755 assert_file "$ESCONFIG" d elasticsearch elasticsearch 755 assert_file "$ESCONFIG/elasticsearch.yml" f elasticsearch elasticsearch 660 assert_file "$ESCONFIG/jvm.options" f elasticsearch elasticsearch 660 diff --git a/server/src/main/java/org/elasticsearch/cli/CommandLoggingConfigurator.java b/server/src/main/java/org/elasticsearch/cli/CommandLoggingConfigurator.java index 406c362dd724a..419b7c4f43a44 100644 --- a/server/src/main/java/org/elasticsearch/cli/CommandLoggingConfigurator.java +++ b/server/src/main/java/org/elasticsearch/cli/CommandLoggingConfigurator.java @@ -27,13 +27,13 @@ * Holder class for method to configure logging without Elasticsearch configuration files for use in CLI tools that will not read such * files. */ -final class CommandLoggingConfigurator { +public final class CommandLoggingConfigurator { /** * Configures logging without Elasticsearch configuration files based on the system property "es.logger.level" only. As such, any * logging will be written to the console. */ - static void configureLoggingWithoutConfig() { + public static void configureLoggingWithoutConfig() { // initialize default for es.logger.level because we will not read the log4j2.properties final String loggerLevel = System.getProperty("es.logger.level", Level.INFO.name()); final Settings settings = Settings.builder().put("logger.level", loggerLevel).build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java new file mode 100644 index 0000000000000..d8fb77433faef --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import org.elasticsearch.cli.CommandLoggingConfigurator; +import org.elasticsearch.cli.MultiCommand; +import org.elasticsearch.cli.Terminal; + +// NodeToolCli does not extend LoggingAwareCommand, because LoggingAwareCommand performs logging initialization +// after LoggingAwareCommand instance is constructed. +// It's too late for us, because before UnsafeBootstrapMasterCommand is added to the list of subcommands +// log4j2 initialization will happen, because it has static reference to Logger class. +// Even if we avoid making a static reference to Logger class, there is no nice way to avoid declaring +// UNSAFE_BOOTSTRAP, which depends on ClusterService, which in turn has static Logger. +// TODO execute CommandLoggingConfigurator.configureLoggingWithoutConfig() in the constructor of commands, not in beforeMain +public class NodeToolCli extends MultiCommand { + + public NodeToolCli() { + super("A CLI tool to unsafely recover a cluster after the permanent loss of too many master-eligible nodes", ()->{}); + CommandLoggingConfigurator.configureLoggingWithoutConfig(); + subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand()); + } + + public static void main(String[] args) throws Exception { + exit(new NodeToolCli().main(args, Terminal.DEFAULT)); + } + +} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java new file mode 100644 index 0000000000000..9db750c2a1f08 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import joptsimple.OptionSet; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.LockObtainFailedException; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.NodeMetaData; +import org.elasticsearch.node.Node; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collections; +import java.util.Locale; +import java.util.Objects; + +public class UnsafeBootstrapMasterCommand extends EnvironmentAwareCommand { + + private static final Logger logger = LogManager.getLogger(UnsafeBootstrapMasterCommand.class); + private final NamedXContentRegistry namedXContentRegistry; + + static final String STOP_WARNING_MSG = + "--------------------------------------------------------------------------\n" + + "\n" + + " WARNING: Elasticsearch MUST be stopped before running this tool." + + "\n"; + static final String CLUSTER_STATE_TERM_VERSION_MSG_FORMAT = + "Current node cluster state (term, version) pair is (%s, %s)"; + static final String CONFIRMATION_MSG = + "--------------------------------------------------------------------------\n" + + "\n" + + "You should run this tool only if you have permanently lost half\n" + + "or more of the master-eligible nodes, and you cannot restore the cluster\n" + + "from a snapshot. This tool can result in arbitrary data loss and\n" + + "should be the last resort.\n" + + "If you have multiple survived master eligible nodes, consider running\n" + + "this tool on the node with the highest cluster state (term, version) pair.\n" + + "Do you want to proceed?\n"; + static final String ABORTED_BY_USER_MSG = "aborted by user"; + static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on master eligible node"; + static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = "failed to lock node's directory, is Elasticsearch still running?"; + static final String NO_NODE_FOLDER_FOUND_MSG = "no node folder is found in data folder(s), node has not been started yet?"; + static final String NO_NODE_METADATA_FOUND_MSG = "no node meta data is found, node has not been started yet?"; + static final String NO_MANIFEST_FILE_FOUND_MSG = "no manifest file is found, do you run pre 7.0 Elasticsearch?"; + static final String GLOBAL_GENERATION_MISSING_MSG = "no metadata is referenced from the manifest file, cluster has never been " + + "bootstrapped?"; + static final String NO_GLOBAL_METADATA_MSG = "failed to find global metadata, metadata corrupted?"; + static final String EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG = + "last committed voting voting configuration is empty, cluster has never been bootstrapped?"; + static final String WRITE_METADATA_EXCEPTION_MSG = "exception occurred when writing new metadata to disk"; + static final String MASTER_NODE_BOOTSTRAPPED_MSG = "Master node was successfully bootstrapped"; + static final Setting UNSAFE_BOOTSTRAP = + ClusterService.USER_DEFINED_META_DATA.getConcreteSetting("cluster.metadata.unsafe-bootstrap"); + + UnsafeBootstrapMasterCommand() { + super("Forces the successful election of the current node after the permanent loss of the half or more master-eligible nodes"); + namedXContentRegistry = new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + terminal.println(STOP_WARNING_MSG); + + Settings settings = env.settings(); + terminal.println(Terminal.Verbosity.VERBOSE, "Checking node.master setting"); + Boolean master = Node.NODE_MASTER_SETTING.get(settings); + if (master == false) { + throw new ElasticsearchException(NOT_MASTER_NODE_MSG); + } + final int nodeOrdinal = 0; + + terminal.println(Terminal.Verbosity.VERBOSE, "Obtaining lock for node"); + + try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(nodeOrdinal, logger, env, Files::exists)) { + processNodePaths(logger, terminal, lock.getNodePaths()); + } catch (LockObtainFailedException ex) { + throw new ElasticsearchException( + FAILED_TO_OBTAIN_NODE_LOCK_MSG + " [" + ex.getMessage() + "]"); + } + + terminal.println(MASTER_NODE_BOOTSTRAPPED_MSG); + } + + private void processNodePaths(Logger logger, Terminal terminal, NodeEnvironment.NodePath[] nodePaths) throws IOException { + final Path[] dataPaths = + Arrays.stream(nodePaths).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new); + if (dataPaths.length == 0) { + throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG); + } + + terminal.println(Terminal.Verbosity.VERBOSE, "Loading node metadata"); + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, dataPaths); + if (nodeMetaData == null) { + throw new ElasticsearchException(NO_NODE_METADATA_FOUND_MSG); + } + + String nodeId = nodeMetaData.nodeId(); + terminal.println(Terminal.Verbosity.VERBOSE, "Current nodeId is " + nodeId); + terminal.println(Terminal.Verbosity.VERBOSE, "Loading manifest file"); + final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, dataPaths); + + if (manifest == null) { + throw new ElasticsearchException(NO_MANIFEST_FILE_FOUND_MSG); + } + if (manifest.isGlobalGenerationMissing()) { + throw new ElasticsearchException(GLOBAL_GENERATION_MISSING_MSG); + } + terminal.println(Terminal.Verbosity.VERBOSE, "Loading global metadata file"); + final MetaData metaData = MetaData.FORMAT.loadGeneration(logger, namedXContentRegistry, manifest.getGlobalGeneration(), + dataPaths); + if (metaData == null) { + throw new ElasticsearchException(NO_GLOBAL_METADATA_MSG + " [generation = " + manifest.getGlobalGeneration() + "]"); + } + final CoordinationMetaData coordinationMetaData = metaData.coordinationMetaData(); + if (coordinationMetaData == null || + coordinationMetaData.getLastCommittedConfiguration() == null || + coordinationMetaData.getLastCommittedConfiguration().isEmpty()) { + throw new ElasticsearchException(EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG); + } + terminal.println(String.format(Locale.ROOT, CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, coordinationMetaData.term(), + metaData.version())); + + terminal.println(CONFIRMATION_MSG); + String text = terminal.readText("Confirm [y/N] "); + if (text.equalsIgnoreCase("y") == false) { + throw new ElasticsearchException(ABORTED_BY_USER_MSG); + } + + CoordinationMetaData newCoordinationMetaData = CoordinationMetaData.builder(coordinationMetaData) + .clearVotingConfigExclusions() + .lastAcceptedConfiguration(new CoordinationMetaData.VotingConfiguration(Collections.singleton(nodeId))) + .lastCommittedConfiguration(new CoordinationMetaData.VotingConfiguration(Collections.singleton(nodeId))) + .build(); + terminal.println(Terminal.Verbosity.VERBOSE, "New coordination metadata is constructed " + newCoordinationMetaData); + Settings persistentSettings = Settings.builder() + .put(metaData.persistentSettings()) + .put(UNSAFE_BOOTSTRAP.getKey(), true) + .build(); + MetaData newMetaData = MetaData.builder(metaData) + .persistentSettings(persistentSettings) + .coordinationMetaData(newCoordinationMetaData) + .build(); + writeNewMetaData(terminal, manifest, newMetaData, dataPaths); + } + + private void writeNewMetaData(Terminal terminal, Manifest manifest, MetaData newMetaData, Path[] dataPaths) { + try { + terminal.println(Terminal.Verbosity.VERBOSE, "Writing new global metadata to disk"); + long newGeneration = MetaData.FORMAT.write(newMetaData, dataPaths); + long newCurrentTerm = manifest.getCurrentTerm() + 1; + terminal.println(Terminal.Verbosity.VERBOSE, "Incrementing currentTerm. New value is " + newCurrentTerm); + Manifest newManifest = new Manifest(newCurrentTerm, manifest.getClusterStateVersion(), newGeneration, + manifest.getIndexGenerations()); + terminal.println(Terminal.Verbosity.VERBOSE, "Writing new manifest file to disk"); + Manifest.FORMAT.writeAndCleanup(newManifest, dataPaths); + terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up old metadata"); + MetaData.FORMAT.cleanupOldFiles(newGeneration, dataPaths); + } catch (Exception e) { + terminal.println(Terminal.Verbosity.VERBOSE, "Cleaning up new metadata"); + MetaData.FORMAT.cleanupOldFiles(manifest.getGlobalGeneration(), dataPaths); + throw new ElasticsearchException(WRITE_METADATA_EXCEPTION_MSG, e); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java new file mode 100644 index 0000000000000..73add5ba83520 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterIT.java @@ -0,0 +1,262 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.ElectMasterService; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.NodeMetaData; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; + +import static org.hamcrest.Matchers.containsString; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) +@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") +public class UnsafeBootstrapMasterIT extends ESIntegTestCase { + + private int bootstrapNodeId; + + @Before + public void resetBootstrapNodeId() { + bootstrapNodeId = -1; + } + + /** + * Performs cluster bootstrap when node with id bootstrapNodeId is started. + * Any node of the batch could be selected as bootstrap target. + */ + @Override + protected List addExtraClusterBootstrapSettings(List allNodesSettings) { + if (internalCluster().size() + allNodesSettings.size() == bootstrapNodeId) { + List nodeNames = new ArrayList<>(); + Collections.addAll(nodeNames, internalCluster().getNodeNames()); + allNodesSettings.forEach(settings -> nodeNames.add(Node.NODE_NAME_SETTING.get(settings))); + + List newSettings = new ArrayList<>(); + int bootstrapIndex = randomInt(allNodesSettings.size() - 1); + for (int i = 0; i < allNodesSettings.size(); i++) { + Settings nodeSettings = allNodesSettings.get(i); + if (i == bootstrapIndex) { + newSettings.add(Settings.builder().put(nodeSettings) + .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeNames) + .build()); + } else { + newSettings.add(nodeSettings); + } + } + + return newSettings; + } + return allNodesSettings; + } + + private MockTerminal executeCommand(Environment environment, boolean abort) throws Exception { + final UnsafeBootstrapMasterCommand command = new UnsafeBootstrapMasterCommand(); + final MockTerminal terminal = new MockTerminal(); + final OptionParser parser = new OptionParser(); + final OptionSet options = parser.parse(); + final String input; + + if (abort) { + input = randomValueOtherThanMany(c -> c.equalsIgnoreCase("y"), () -> randomAlphaOfLength(1)); + } else { + input = randomBoolean() ? "y" : "Y"; + } + + terminal.addTextInput(input); + + try { + command.execute(terminal, options, environment); + assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); + } finally { + assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.STOP_WARNING_MSG)); + } + + return terminal; + } + + private MockTerminal executeCommand(Environment environment) throws Exception { + return executeCommand(environment, false); + } + + private void expectThrows(ThrowingRunnable runnable, String message) { + ElasticsearchException ex = expectThrows(ElasticsearchException.class, runnable); + assertThat(ex.getMessage(), containsString(message)); + } + + public void testNotMasterEligible() { + final Environment environment = TestEnvironment.newEnvironment(Settings.builder() + .put(internalCluster().getDefaultSettings()) + .put(Node.NODE_MASTER_SETTING.getKey(), false) + .build()); + expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NOT_MASTER_NODE_MSG); + } + + public void testNoDataFolder() { + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_NODE_FOLDER_FOUND_MSG); + } + + public void testNodeLocked() throws IOException { + Settings envSettings = buildEnvSettings(Settings.EMPTY); + Environment environment = TestEnvironment.newEnvironment(envSettings); + try (NodeEnvironment ignored = new NodeEnvironment(envSettings, environment)) { + expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + } + } + + public void testNoNodeMetaData() throws IOException { + Settings envSettings = buildEnvSettings(Settings.EMPTY); + Environment environment = TestEnvironment.newEnvironment(envSettings); + try (NodeEnvironment nodeEnvironment = new NodeEnvironment(envSettings, environment)) { + NodeMetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + } + + expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_NODE_METADATA_FOUND_MSG); + } + + public void testNotBootstrappedCluster() throws Exception { + internalCluster().startNode( + Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.MAX_VALUE) + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup + .build()); + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + }); + + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.GLOBAL_GENERATION_MISSING_MSG); + } + + public void testNoManifestFile() throws IOException { + bootstrapNodeId = 1; + internalCluster().startNode(Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.MAX_VALUE) + .build()); + ensureStableCluster(1); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomDataNode(); + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + + expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_MANIFEST_FILE_FOUND_MSG); + } + + public void testNoMetaData() throws IOException { + bootstrapNodeId = 1; + internalCluster().startNode(Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.MAX_VALUE) + .build()); + ensureStableCluster(1); + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); + + expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.NO_GLOBAL_METADATA_MSG); + } + + public void testAbortedByUser() throws IOException { + bootstrapNodeId = 1; + internalCluster().startNode(Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.MAX_VALUE) + .build()); + ensureStableCluster(1); + internalCluster().stopRandomDataNode(); + + Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> executeCommand(environment, true), UnsafeBootstrapMasterCommand.ABORTED_BY_USER_MSG); + } + + public void test3MasterNodes2Failed() throws Exception { + bootstrapNodeId = 3; + List masterNodes = internalCluster().startMasterOnlyNodes(3, Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.MAX_VALUE) + .build()); + + String dataNode = internalCluster().startDataOnlyNode(Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.MAX_VALUE) + .build()); + createIndex("test"); + + Client dataNodeClient = internalCluster().client(dataNode); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(1))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(2))); + + assertBusy(() -> { + ClusterState state = dataNodeClient.admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertTrue(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + }); + + final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + expectThrows(() -> executeCommand(environment), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + + NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(0))); + + MockTerminal terminal = executeCommand(environment); + + MetaData metaData = MetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodeEnvironment.nodeDataPaths()); + assertThat(terminal.getOutput(), containsString( + String.format(Locale.ROOT, UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, + metaData.coordinationMetaData().term(), metaData.version()))); + + internalCluster().startMasterOnlyNode(Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.MAX_VALUE) + .build()); + + assertBusy(() -> { + ClusterState state = dataNodeClient.admin().cluster().prepareState().setLocal(true) + .execute().actionGet().getState(); + assertFalse(state.blocks().hasGlobalBlockWithId(DiscoverySettings.NO_MASTER_BLOCK_ID)); + assertTrue(state.metaData().persistentSettings().getAsBoolean(UnsafeBootstrapMasterCommand.UNSAFE_BOOTSTRAP.getKey(), false)); + }); + + ensureGreen("test"); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 8cb35fa13db5d..61353d42ef178 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -928,11 +928,15 @@ public NodeEnvironment newNodeEnvironment() throws IOException { return newNodeEnvironment(Settings.EMPTY); } - public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException { - Settings build = Settings.builder() + public Settings buildEnvSettings(Settings settings) { + return Settings.builder() .put(settings) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()).build(); + } + + public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException { + Settings build = buildEnvSettings(settings); return new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); } From 864e46551536605a8e76b401068791069912382a Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 24 Jan 2019 13:09:43 -0500 Subject: [PATCH 16/64] Adjust minRetainedSeqNo asssertion in CombinedDeletionPolicyTests In these tests, we initialize the retained_seq_no with NO_OPS_PERFORMED, thus we should verify that the min of the retained_seq_no is at least NO_OPS_PERFORMED not 0. Closes #35994 --- .../index/engine/CombinedDeletionPolicyTests.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index ff33ce19d484b..054bfb8bad695 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -39,6 +39,7 @@ import java.util.concurrent.atomic.AtomicLong; import static java.util.Collections.singletonList; +import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.doAnswer; @@ -54,7 +55,7 @@ public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); final int extraRetainedOps = between(0, 100); final SoftDeletesPolicy softDeletesPolicy = - new SoftDeletesPolicy(globalCheckpoint::get, -1, extraRetainedOps, Collections::emptyList); + new SoftDeletesPolicy(globalCheckpoint::get, NO_OPS_PERFORMED, extraRetainedOps, Collections::emptyList); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); @@ -91,8 +92,9 @@ public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { } assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(translogGenList.get(keptIndex))); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); - assertThat(softDeletesPolicy.getMinRetainedSeqNo(), equalTo( - Math.max(0, Math.min(getLocalCheckpoint(commitList.get(keptIndex)) + 1, globalCheckpoint.get() + 1 - extraRetainedOps)))); + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), + equalTo(Math.max(NO_OPS_PERFORMED, + Math.min(getLocalCheckpoint(commitList.get(keptIndex)) + 1, globalCheckpoint.get() + 1 - extraRetainedOps)))); } public void testAcquireIndexCommit() throws Exception { @@ -129,7 +131,7 @@ public void testAcquireIndexCommit() throws Exception { indexPolicy.onCommit(commitList); IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get()); assertThat(softDeletesPolicy.getMinRetainedSeqNo(), equalTo( - Math.max(0, Math.min(getLocalCheckpoint(safeCommit) + 1, globalCheckpoint.get() + 1 - extraRetainedOps)))); + Math.max(NO_OPS_PERFORMED, Math.min(getLocalCheckpoint(safeCommit) + 1, globalCheckpoint.get() + 1 - extraRetainedOps)))); // Captures and releases some commits int captures = between(0, 5); for (int n = 0; n < captures; n++) { @@ -160,7 +162,8 @@ public void testAcquireIndexCommit() throws Exception { assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(Long.parseLong(commitList.get(commitList.size() - 1).getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); assertThat(softDeletesPolicy.getMinRetainedSeqNo(), equalTo( - Math.max(0, Math.min(getLocalCheckpoint(commitList.get(safeIndex)) + 1, globalCheckpoint.get() + 1 - extraRetainedOps)))); + Math.max(NO_OPS_PERFORMED, + Math.min(getLocalCheckpoint(commitList.get(safeIndex)) + 1, globalCheckpoint.get() + 1 - extraRetainedOps)))); } snapshottingCommits.forEach(indexPolicy::releaseCommit); globalCheckpoint.set(randomLongBetween(lastMaxSeqNo, Long.MAX_VALUE)); @@ -174,7 +177,7 @@ public void testAcquireIndexCommit() throws Exception { assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get()); assertThat(softDeletesPolicy.getMinRetainedSeqNo(), equalTo( - Math.max(0, Math.min(getLocalCheckpoint(safeCommit) + 1, globalCheckpoint.get() + 1 - extraRetainedOps)))); + Math.max(NO_OPS_PERFORMED, Math.min(getLocalCheckpoint(safeCommit) + 1, globalCheckpoint.get() + 1 - extraRetainedOps)))); } public void testLegacyIndex() throws Exception { From af2f4c8f73673ecba663c6bfa52409d9790861ac Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 24 Jan 2019 12:15:05 +0100 Subject: [PATCH 17/64] enable bwc tests and bump versions after backporting https://github.com/elastic/elasticsearch/pull/37639 --- build.gradle | 4 ++-- .../test/resources/rest-api-spec/test/11_parent_child.yml | 6 ------ .../test/search.aggregation/200_top_hits_metric.yml | 3 --- .../rest-api-spec/test/search/300_sequence_numbers.yml | 8 -------- .../org/elasticsearch/index/query/InnerHitBuilder.java | 4 ++-- .../src/main/java/org/elasticsearch/search/SearchHit.java | 4 ++-- .../aggregations/metrics/TopHitsAggregationBuilder.java | 4 ++-- .../elasticsearch/search/builder/SearchSourceBuilder.java | 4 ++-- 8 files changed, 10 insertions(+), 27 deletions(-) diff --git a/build.gradle b/build.gradle index d4e2616c20263..c5611e8b453fb 100644 --- a/build.gradle +++ b/build.gradle @@ -159,8 +159,8 @@ task verifyVersions { * the enabled state of every bwc task. It should be set back to true * after the backport of the backcompat code is complete. */ -final boolean bwc_tests_enabled = false -final String bwc_tests_disabled_issue = "backporting https://github.com/elastic/elasticsearch/pull/37639" /* place a PR link here when committing bwc changes */ +final boolean bwc_tests_enabled = true +final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml b/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml index 61af4ab1acb59..d120504f18c6d 100644 --- a/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml +++ b/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml @@ -44,17 +44,11 @@ setup: --- "Parent/child inner hits with seq no": - - skip: - version: " - 6.99.99" - reason: support was added in 7.0 - - do: search: - rest_total_hits_as_int: true body: { "query" : { "has_child" : { "type" : "child", "query" : { "match_all" : {} }, "inner_hits" : { "seq_no_primary_term": true} } } } - - match: { hits.total: 1 } - match: { hits.hits.0._index: "test" } - match: { hits.hits.0._id: "1" } - match: { hits.hits.0.inner_hits.child.hits.hits.0._index: "test"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml index 775475e01a597..82207fe9b3995 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml @@ -87,9 +87,6 @@ setup: --- "top_hits aggregation with sequence numbers": - - skip: - version: " - 6.99.99" - reason: support was added in 7.0 - do: search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml index 9e838d1c58f77..716740653111a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml @@ -24,10 +24,6 @@ setup: --- "sequence numbers are returned if requested from body": - - skip: - version: " - 6.99.99" - reason: sequence numbers were added in 7.0.0 - - do: search: index: _all @@ -43,10 +39,6 @@ setup: --- "sequence numbers are returned if requested from url": - - skip: - version: " - 6.99.99" - reason: sequence numbers were added in 7.0.0 - - do: search: index: _all diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index f5be9650b8d5c..d72134198b8e8 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -156,7 +156,7 @@ public InnerHitBuilder(StreamInput in) throws IOException { size = in.readVInt(); explain = in.readBoolean(); version = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_7_0_0)){ + if (in.getVersion().onOrAfter(Version.V_6_7_0)){ seqNoAndPrimaryTerm = in.readBoolean(); } else { seqNoAndPrimaryTerm = false; @@ -205,7 +205,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(size); out.writeBoolean(explain); out.writeBoolean(version); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { out.writeBoolean(seqNoAndPrimaryTerm); } out.writeBoolean(trackScores); diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 42f96e52fb119..df82bbec59900 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -784,7 +784,7 @@ public void readFrom(StreamInput in) throws IOException { type = in.readOptionalText(); nestedIdentity = in.readOptionalWriteable(NestedIdentity::new); version = in.readLong(); - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { seqNo = in.readZLong(); primaryTerm = in.readVLong(); } @@ -856,7 +856,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalText(type); out.writeOptionalWriteable(nestedIdentity); out.writeLong(version); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { out.writeZLong(seqNo); out.writeVLong(primaryTerm); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index ba51099d6bc00..0d9a05c46713b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -140,7 +140,7 @@ public TopHitsAggregationBuilder(StreamInput in) throws IOException { } trackScores = in.readBoolean(); version = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { seqNoAndPrimaryTerm = in.readBoolean(); } } @@ -179,7 +179,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { } out.writeBoolean(trackScores); out.writeBoolean(version); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { out.writeBoolean(seqNoAndPrimaryTerm); } } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 8e072d36c2c33..81dd84ad8e48b 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -250,7 +250,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { timeout = in.readOptionalTimeValue(); trackScores = in.readBoolean(); version = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { seqNoAndPrimaryTerm = in.readOptionalBoolean(); } else { seqNoAndPrimaryTerm = null; @@ -318,7 +318,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalTimeValue(timeout); out.writeBoolean(trackScores); out.writeOptionalBoolean(version); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { out.writeOptionalBoolean(seqNoAndPrimaryTerm); } out.writeNamedWriteableList(extBuilders); From e1d8df4ffae3d25a8ddd73a04234d5887cda45c0 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Thu, 24 Jan 2019 13:17:47 -0800 Subject: [PATCH 18/64] Deprecate types in create index requests. (#37134) From #29453 and #37285, the include_type_name parameter was already present and defaulted to false. This PR makes the following updates: * Add deprecation warnings to RestCreateIndexAction, plus tests in RestCreateIndexActionTests. * Add a typeless 'create index' method to the Java HLRC, and deprecate the old typed version. To do this cleanly, I created new CreateIndexRequest and CreateIndexResponse objects that differ from the existing server ones. --- .../elasticsearch/client/IndicesClient.java | 58 ++- .../client/IndicesRequestConverters.java | 17 +- .../client/indices/CreateIndexRequest.java | 364 ++++++++++++++++++ .../client/indices/CreateIndexResponse.java | 74 ++++ .../java/org/elasticsearch/client/CCRIT.java | 4 +- .../client/ESRestHighLevelClientTestCase.java | 2 +- .../elasticsearch/client/IndicesClientIT.java | 83 +++- .../client/IndicesRequestConvertersTests.java | 38 +- .../client/MachineLearningIT.java | 59 ++- .../documentation/CCRDocumentationIT.java | 4 +- .../documentation/CRUDDocumentationIT.java | 22 +- .../ClusterClientDocumentationIT.java | 2 +- .../documentation/ILMDocumentationIT.java | 18 +- .../IndicesClientDocumentationIT.java | 47 +-- .../MlClientDocumentationIT.java | 36 +- .../documentation/SearchDocumentationIT.java | 32 +- .../SnapshotClientDocumentationIT.java | 2 +- .../indices/CreateIndexRequestTests.java | 93 +++++ .../indices/PutMappingRequestTests.java | 1 - .../indices/RandomCreateIndexGenerator.java | 61 +++ .../high-level/indices/create_index.asciidoc | 7 - .../admin/indices/RestCreateIndexAction.java | 12 + .../mapping/put/PutMappingRequestTests.java | 2 +- .../indices/RestCreateIndexActionTests.java | 62 +++ .../index/RandomCreateIndexGenerator.java | 20 +- .../test/AbstractXContentTestCase.java | 2 +- 26 files changed, 1009 insertions(+), 113 deletions(-) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/indices/CreateIndexRequest.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/indices/CreateIndexResponse.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/indices/CreateIndexRequestTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/indices/RandomCreateIndexGenerator.java create mode 100644 server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexActionTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index bed7d30242801..24c0175b7e884 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -27,8 +27,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; @@ -59,6 +57,8 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.core.ShardsAcknowledgedResponse; +import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.client.indices.FreezeIndexRequest; import org.elasticsearch.client.indices.GetIndexTemplatesRequest; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; @@ -120,9 +120,10 @@ public void deleteAsync(DeleteIndexRequest deleteIndexRequest, RequestOptions op * @return the response * @throws IOException in case there is a problem sending the request or parsing back the response */ - public CreateIndexResponse create(CreateIndexRequest createIndexRequest, RequestOptions options) throws IOException { + public CreateIndexResponse create(CreateIndexRequest createIndexRequest, + RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, IndicesRequestConverters::createIndex, options, - CreateIndexResponse::fromXContent, emptySet()); + CreateIndexResponse::fromXContent, emptySet()); } /** @@ -133,9 +134,54 @@ public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion */ - public void createAsync(CreateIndexRequest createIndexRequest, RequestOptions options, ActionListener listener) { + public void createAsync(CreateIndexRequest createIndexRequest, + RequestOptions options, + ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, IndicesRequestConverters::createIndex, options, - CreateIndexResponse::fromXContent, listener, emptySet()); + CreateIndexResponse::fromXContent, listener, emptySet()); + } + + /** + * Creates an index using the Create Index API. + * See + * Create Index API on elastic.co + * @param createIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + * + * @deprecated This method uses an old request object which still refers to types, a deprecated feature. The + * method {@link #create(CreateIndexRequest, RequestOptions)} should be used instead, which accepts a new + * request object. + */ + @Deprecated + public org.elasticsearch.action.admin.indices.create.CreateIndexResponse create( + org.elasticsearch.action.admin.indices.create.CreateIndexRequest createIndexRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, + IndicesRequestConverters::createIndex, options, + org.elasticsearch.action.admin.indices.create.CreateIndexResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously creates an index using the Create Index API. + * See + * Create Index API on elastic.co + * @param createIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * + * @deprecated This method uses an old request object which still refers to types, a deprecated feature. The + * method {@link #createAsync(CreateIndexRequest, RequestOptions, ActionListener)} should be used instead, + * which accepts a new request object. + */ + @Deprecated + public void createAsync(org.elasticsearch.action.admin.indices.create.CreateIndexRequest createIndexRequest, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, + IndicesRequestConverters::createIndex, options, + org.elasticsearch.action.admin.indices.create.CreateIndexResponse::fromXContent, listener, emptySet()); } /** diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index ece8bdffa2566..2b44e3006b1be 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; @@ -47,6 +46,7 @@ import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.FreezeIndexRequest; import org.elasticsearch.client.indices.GetIndexTemplatesRequest; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; @@ -98,6 +98,21 @@ static Request closeIndex(CloseIndexRequest closeIndexRequest) { } static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPart(createIndexRequest.index()).build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withTimeout(createIndexRequest.timeout()); + parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout()); + parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); + + request.setEntity(RequestConverters.createEntity(createIndexRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request createIndex(org.elasticsearch.action.admin.indices.create.CreateIndexRequest createIndexRequest) + throws IOException { String endpoint = RequestConverters.endpoint(createIndexRequest.indices()); Request request = new Request(HttpPut.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/CreateIndexRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/CreateIndexRequest.java new file mode 100644 index 0000000000000..f0bff6e6f4307 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/CreateIndexRequest.java @@ -0,0 +1,364 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; + +/** + * A request to create an index. + */ +public class CreateIndexRequest extends TimedRequest implements Validatable, ToXContentObject { + static final ParseField MAPPINGS = new ParseField("mappings"); + static final ParseField SETTINGS = new ParseField("settings"); + static final ParseField ALIASES = new ParseField("aliases"); + + private final String index; + private Settings settings = EMPTY_SETTINGS; + + private BytesReference mappings; + private XContentType mappingsXContentType; + + private final Set aliases = new HashSet<>(); + + private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; + + /** + * Constructs a new request to create an index with the specified name. + */ + public CreateIndexRequest(String index) { + if (index == null) { + throw new IllegalArgumentException("The index name cannot be null."); + } + this.index = index; + } + + /** + * The name of the index to create. + */ + public String index() { + return index; + } + + /** + * The settings to create the index with. + */ + public Settings settings() { + return settings; + } + + /** + * The settings to create the index with. + */ + public CreateIndexRequest settings(Settings.Builder settings) { + this.settings = settings.build(); + return this; + } + + /** + * The settings to create the index with. + */ + public CreateIndexRequest settings(Settings settings) { + this.settings = settings; + return this; + } + + /** + * The settings to create the index with (either json or yaml format) + */ + public CreateIndexRequest settings(String source, XContentType xContentType) { + this.settings = Settings.builder().loadFromSource(source, xContentType).build(); + return this; + } + + /** + * Allows to set the settings using a json builder. + */ + public CreateIndexRequest settings(XContentBuilder builder) { + settings(Strings.toString(builder), builder.contentType()); + return this; + } + + /** + * The settings to create the index with (either json/yaml/properties format) + */ + public CreateIndexRequest settings(Map source) { + try { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.map(source); + settings(Strings.toString(builder), XContentType.JSON); + } catch (IOException e) { + throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); + } + return this; + } + + public BytesReference mappings() { + return mappings; + } + + public XContentType mappingsXContentType() { + return mappingsXContentType; + } + + /** + * Adds mapping that will be added when the index gets created. + * + * Note that the definition should *not* be nested under a type name. + * + * @param source The mapping source + * @param xContentType The content type of the source + */ + public CreateIndexRequest mapping(String source, XContentType xContentType) { + return mapping(new BytesArray(source), xContentType); + } + + /** + * Adds mapping that will be added when the index gets created. + * + * Note that the definition should *not* be nested under a type name. + * + * @param source The mapping source + */ + public CreateIndexRequest mapping(XContentBuilder source) { + return mapping(BytesReference.bytes(source), source.contentType()); + } + + /** + * Adds mapping that will be added when the index gets created. + * + * Note that the definition should *not* be nested under a type name. + * + * @param source The mapping source + */ + public CreateIndexRequest mapping(Map source) { + try { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.map(source); + return mapping(BytesReference.bytes(builder), builder.contentType()); + } catch (IOException e) { + throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); + } + } + + /** + * Adds mapping that will be added when the index gets created. + * + * Note that the definition should *not* be nested under a type name. + * + * @param source The mapping source + * @param xContentType the content type of the mapping source + */ + public CreateIndexRequest mapping(BytesReference source, XContentType xContentType) { + Objects.requireNonNull(xContentType); + mappings = source; + mappingsXContentType = xContentType; + return this; + } + + public Set aliases() { + return this.aliases; + } + + /** + * Sets the aliases that will be associated with the index when it gets created + */ + public CreateIndexRequest aliases(Map source) { + try { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.map(source); + return aliases(BytesReference.bytes(builder), builder.contentType()); + } catch (IOException e) { + throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); + } + } + + /** + * Sets the aliases that will be associated with the index when it gets created + */ + public CreateIndexRequest aliases(XContentBuilder source) { + return aliases(BytesReference.bytes(source), source.contentType()); + } + + /** + * Sets the aliases that will be associated with the index when it gets created + */ + public CreateIndexRequest aliases(String source, XContentType contentType) { + return aliases(new BytesArray(source), contentType); + } + + /** + * Sets the aliases that will be associated with the index when it gets created + */ + public CreateIndexRequest aliases(BytesReference source, XContentType contentType) { + // EMPTY is safe here because we never call namedObject + try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, source, contentType)) { + //move to the first alias + parser.nextToken(); + while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { + alias(Alias.fromXContent(parser)); + } + return this; + } catch(IOException e) { + throw new ElasticsearchParseException("Failed to parse aliases", e); + } + } + + /** + * Adds an alias that will be associated with the index when it gets created + */ + public CreateIndexRequest alias(Alias alias) { + this.aliases.add(alias); + return this; + } + + /** + * Adds aliases that will be associated with the index when it gets created + */ + public CreateIndexRequest aliases(Collection aliases) { + this.aliases.addAll(aliases); + return this; + } + + /** + * Sets the settings and mappings as a single source. + * + * Note that the mapping definition should *not* be nested under a type name. + */ + public CreateIndexRequest source(String source, XContentType xContentType) { + return source(new BytesArray(source), xContentType); + } + + /** + * Sets the settings and mappings as a single source. + * + * Note that the mapping definition should *not* be nested under a type name. + */ + public CreateIndexRequest source(XContentBuilder source) { + return source(BytesReference.bytes(source), source.contentType()); + } + + /** + * Sets the settings and mappings as a single source. + * + * Note that the mapping definition should *not* be nested under a type name. + */ + public CreateIndexRequest source(BytesReference source, XContentType xContentType) { + Objects.requireNonNull(xContentType); + source(XContentHelper.convertToMap(source, false, xContentType).v2()); + return this; + } + + /** + * Sets the settings and mappings as a single source. + * + * Note that the mapping definition should *not* be nested under a type name. + */ + @SuppressWarnings("unchecked") + public CreateIndexRequest source(Map source) { + DeprecationHandler deprecationHandler = DeprecationHandler.THROW_UNSUPPORTED_OPERATION; + for (Map.Entry entry : source.entrySet()) { + String name = entry.getKey(); + if (SETTINGS.match(name, deprecationHandler)) { + settings((Map) entry.getValue()); + } else if (MAPPINGS.match(name, deprecationHandler)) { + mapping((Map) entry.getValue()); + } else if (ALIASES.match(name, deprecationHandler)) { + aliases((Map) entry.getValue()); + } + } + return this; + } + + public ActiveShardCount waitForActiveShards() { + return waitForActiveShards; + } + + /** + * Sets the number of shard copies that should be active for index creation to return. + * Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy + * (the primary) to become active. Set this value to {@link ActiveShardCount#ALL} to + * wait for all shards (primary and all replicas) to be active before returning. + * Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any + * non-negative integer, up to the number of copies per shard (number of replicas + 1), + * to wait for the desired amount of shard copies to become active before returning. + * Index creation will only wait up until the timeout value for the number of shard copies + * to be active before returning. Check {@link CreateIndexResponse#isShardsAcknowledged()} to + * determine if the requisite shard copies were all started before returning or timing out. + * + * @param waitForActiveShards number of active shard copies to wait on + */ + public CreateIndexRequest waitForActiveShards(ActiveShardCount waitForActiveShards) { + this.waitForActiveShards = waitForActiveShards; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startObject(SETTINGS.getPreferredName()); + settings.toXContent(builder, params); + builder.endObject(); + + if (mappings != null) { + try (InputStream stream = mappings.streamInput()) { + builder.rawField(MAPPINGS.getPreferredName(), stream, mappingsXContentType); + } + } + + builder.startObject(ALIASES.getPreferredName()); + for (Alias alias : aliases) { + alias.toXContent(builder, params); + } + builder.endObject(); + + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/CreateIndexResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/CreateIndexResponse.java new file mode 100644 index 0000000000000..9755edd680f53 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/CreateIndexResponse.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * A response for a create index action. + */ +public class CreateIndexResponse extends ShardsAcknowledgedResponse { + + private static final ParseField INDEX = new ParseField("index"); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("create_index", + true, args -> new CreateIndexResponse((boolean) args[0], (boolean) args[1], (String) args[2])); + + static { + declareAcknowledgedAndShardsAcknowledgedFields(PARSER); + PARSER.declareField(constructorArg(), (parser, context) -> parser.textOrNull(), INDEX, ObjectParser.ValueType.STRING_OR_NULL); + } + + private String index; + + public CreateIndexResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { + super(acknowledged, shardsAcknowledged); + this.index = index; + } + + public String index() { + return index; + } + + public static CreateIndexResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + CreateIndexResponse that = (CreateIndexResponse) o; + return Objects.equals(index, that.index); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), index); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java index 90799522372dc..97a379aa16a90 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java @@ -24,8 +24,6 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -46,6 +44,8 @@ import org.elasticsearch.client.ccr.ResumeFollowRequest; import org.elasticsearch.client.ccr.UnfollowRequest; import org.elasticsearch.client.core.AcknowledgedResponse; +import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index efe94596b81b6..976ae754d335f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -21,11 +21,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 72bc366e7a9c9..fe175b217bd5e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -33,8 +33,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; @@ -43,8 +41,6 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.client.indices.GetFieldMappingsRequest; -import org.elasticsearch.client.indices.GetFieldMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -70,7 +66,11 @@ import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.core.ShardsAcknowledgedResponse; +import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.client.indices.FreezeIndexRequest; +import org.elasticsearch.client.indices.GetFieldMappingsRequest; +import org.elasticsearch.client.indices.GetFieldMappingsResponse; import org.elasticsearch.client.indices.GetIndexTemplatesRequest; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutMappingRequest; @@ -93,6 +93,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.admin.indices.RestCreateIndexAction; import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.elasticsearch.rest.action.admin.indices.RestPutMappingAction; @@ -212,7 +213,7 @@ public void testCreateIndex() throws IOException { mappingBuilder.startObject().startObject("properties").startObject("field"); mappingBuilder.field("type", "text"); mappingBuilder.endObject().endObject().endObject(); - createIndexRequest.mapping(MapperService.SINGLE_MAPPING_NAME, mappingBuilder); + createIndexRequest.mapping(mappingBuilder); CreateIndexResponse createIndexResponse = execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync); @@ -233,6 +234,70 @@ public void testCreateIndex() throws IOException { } } + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testCreateIndexWithTypes() throws IOException { + { + // Create index + String indexName = "plain_index"; + assertFalse(indexExists(indexName)); + + org.elasticsearch.action.admin.indices.create.CreateIndexRequest createIndexRequest = + new org.elasticsearch.action.admin.indices.create.CreateIndexRequest(indexName); + + org.elasticsearch.action.admin.indices.create.CreateIndexResponse createIndexResponse = execute( + createIndexRequest, + highLevelClient().indices()::create, + highLevelClient().indices()::createAsync, + expectWarnings(RestCreateIndexAction.TYPES_DEPRECATION_MESSAGE)); + assertTrue(createIndexResponse.isAcknowledged()); + + assertTrue(indexExists(indexName)); + } + { + // Create index with mappings, aliases and settings + String indexName = "rich_index"; + assertFalse(indexExists(indexName)); + + org.elasticsearch.action.admin.indices.create.CreateIndexRequest createIndexRequest = + new org.elasticsearch.action.admin.indices.create.CreateIndexRequest(indexName); + + Alias alias = new Alias("alias_name"); + alias.filter("{\"term\":{\"year\":2016}}"); + alias.routing("1"); + createIndexRequest.alias(alias); + + Settings.Builder settings = Settings.builder(); + settings.put(SETTING_NUMBER_OF_REPLICAS, 2); + createIndexRequest.settings(settings); + + XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); + mappingBuilder.startObject().startObject("properties").startObject("field"); + mappingBuilder.field("type", "text"); + mappingBuilder.endObject().endObject().endObject(); + createIndexRequest.mapping(MapperService.SINGLE_MAPPING_NAME, mappingBuilder); + + org.elasticsearch.action.admin.indices.create.CreateIndexResponse createIndexResponse = execute( + createIndexRequest, + highLevelClient().indices()::create, + highLevelClient().indices()::createAsync, + expectWarnings(RestCreateIndexAction.TYPES_DEPRECATION_MESSAGE)); + assertTrue(createIndexResponse.isAcknowledged()); + + Map getIndexResponse = getAsMap(indexName); + assertEquals("2", XContentMapValues.extractValue(indexName + ".settings.index.number_of_replicas", getIndexResponse)); + + Map aliasData = + (Map)XContentMapValues.extractValue(indexName + ".aliases.alias_name", getIndexResponse); + assertNotNull(aliasData); + assertEquals("1", aliasData.get("index_routing")); + Map filter = (Map) aliasData.get("filter"); + Map term = (Map) filter.get("term"); + assertEquals(2016, term.get("year")); + + assertEquals("text", XContentMapValues.extractValue(indexName + ".mappings.properties.field.type", getIndexResponse)); + } + } + public void testGetSettings() throws IOException { String indexName = "get_settings_index"; Settings basicSettings = Settings.builder() @@ -915,7 +980,9 @@ public void testShrink() throws IOException { .put("index.number_of_replicas", 0) .putNull("index.routing.allocation.require._name") .build(); - resizeRequest.setTargetIndex(new CreateIndexRequest("target").settings(targetSettings).alias(new Alias("alias"))); + resizeRequest.setTargetIndex(new org.elasticsearch.action.admin.indices.create.CreateIndexRequest("target") + .settings(targetSettings) + .alias(new Alias("alias"))); ResizeResponse resizeResponse = execute(resizeRequest, highLevelClient().indices()::shrink, highLevelClient().indices()::shrinkAsync); assertTrue(resizeResponse.isAcknowledged()); @@ -938,7 +1005,9 @@ public void testSplit() throws IOException { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); resizeRequest.setResizeType(ResizeType.SPLIT); Settings targetSettings = Settings.builder().put("index.number_of_shards", 4).put("index.number_of_replicas", 0).build(); - resizeRequest.setTargetIndex(new CreateIndexRequest("target").settings(targetSettings).alias(new Alias("alias"))); + resizeRequest.setTargetIndex(new org.elasticsearch.action.admin.indices.create.CreateIndexRequest("target") + .settings(targetSettings) + .alias(new Alias("alias"))); ResizeResponse resizeResponse = execute(resizeRequest, highLevelClient().indices()::split, highLevelClient().indices()::splitAsync); assertTrue(resizeResponse.isAcknowledged()); assertTrue(resizeResponse.isShardsAcknowledged()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java index 3e4dcd0209764..5e08381720ec7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; @@ -50,6 +49,8 @@ import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.indices.RandomCreateIndexGenerator; import org.elasticsearch.client.indices.GetIndexTemplatesRequest; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutMappingRequest; @@ -59,7 +60,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.test.ESTestCase; import org.junit.Assert; @@ -75,7 +75,6 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.elasticsearch.index.RandomCreateIndexGenerator.randomAliases; -import static org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest; import static org.elasticsearch.index.RandomCreateIndexGenerator.randomIndexSettings; import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAliasAction; import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; @@ -127,7 +126,23 @@ public void testIndicesExistEmptyIndices() { } public void testCreateIndex() throws IOException { - CreateIndexRequest createIndexRequest = randomCreateIndexRequest(); + CreateIndexRequest createIndexRequest = RandomCreateIndexGenerator.randomCreateIndexRequest(); + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomTimeout(createIndexRequest, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + RequestConvertersTests.setRandomMasterTimeout(createIndexRequest, expectedParams); + RequestConvertersTests.setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams); + + Request request = IndicesRequestConverters.createIndex(createIndexRequest); + Assert.assertEquals("/" + createIndexRequest.index(), request.getEndpoint()); + Assert.assertEquals(expectedParams, request.getParameters()); + Assert.assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + RequestConvertersTests.assertToXContentBody(createIndexRequest, request.getEntity()); + } + + public void testCreateIndexWithTypes() throws IOException { + org.elasticsearch.action.admin.indices.create.CreateIndexRequest createIndexRequest = + org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest(); Map expectedParams = new HashMap<>(); RequestConvertersTests.setRandomTimeout(createIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); @@ -143,8 +158,8 @@ public void testCreateIndex() throws IOException { } public void testCreateIndexNullIndex() { - ActionRequestValidationException validationException = new CreateIndexRequest(null).validate(); - Assert.assertNotNull(validationException); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new CreateIndexRequest(null)); + assertEquals(e.getMessage(), "The index name cannot be null."); } public void testUpdateAliases() throws IOException { @@ -754,7 +769,8 @@ private void resizeTest(ResizeType resizeType, CheckedFunction + request.mapping(// <1> "{\n" + - " \"_doc\": {\n" + - " \"properties\": {\n" + - " \"message\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + " }\n" + " }\n" + "}", // <2> @@ -325,7 +324,7 @@ public void testCreateIndex() throws IOException { Map mapping = new HashMap<>(); mapping.put("properties", properties); jsonMap.put("_doc", mapping); - request.mapping("_doc", jsonMap); // <1> + request.mapping(jsonMap); // <1> //end::create-index-mappings-map CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); @@ -351,19 +350,11 @@ public void testCreateIndex() throws IOException { builder.endObject(); } builder.endObject(); - request.mapping("_doc", builder); // <1> + request.mapping(builder); // <1> //end::create-index-mappings-xcontent CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } - { - request = new CreateIndexRequest("twitter4"); - //tag::create-index-mappings-shortcut - request.mapping("_doc", "message", "type=text"); // <1> - //end::create-index-mappings-shortcut - CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); - assertTrue(createIndexResponse.isAcknowledged()); - } request = new CreateIndexRequest("twitter5"); // tag::create-index-request-aliases @@ -371,15 +362,13 @@ public void testCreateIndex() throws IOException { // end::create-index-request-aliases // tag::create-index-request-timeout - request.timeout(TimeValue.timeValueMinutes(2)); // <1> - request.timeout("2m"); // <2> + request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> // end::create-index-request-timeout // tag::create-index-request-masterTimeout - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> - request.masterNodeTimeout("1m"); // <2> + request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::create-index-request-masterTimeout // tag::create-index-request-waitForActiveShards - request.waitForActiveShards(2); // <1> + request.waitForActiveShards(ActiveShardCount.from(2)); // <1> request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2> // end::create-index-request-waitForActiveShards { @@ -1118,7 +1107,7 @@ public void testGetSettings() throws Exception { { Settings settings = Settings.builder().put("number_of_shards", 3).build(); CreateIndexResponse createIndexResponse = client.indices().create( - new CreateIndexRequest("index", settings), RequestOptions.DEFAULT); + new CreateIndexRequest("index").settings(settings), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1182,7 +1171,7 @@ public void testGetSettingsWithDefaults() throws Exception { { Settings settings = Settings.builder().put("number_of_shards", 3).build(); CreateIndexResponse createIndexResponse = client.indices().create( - new CreateIndexRequest("index", settings), RequestOptions.DEFAULT); + new CreateIndexRequest("index").settings(settings), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1233,9 +1222,11 @@ public void testGetIndex() throws Exception { { Settings settings = Settings.builder().put("number_of_shards", 3).build(); String mappings = "{\"properties\":{\"field-1\":{\"type\":\"integer\"}}}"; + CreateIndexRequest createIndexRequest = new CreateIndexRequest("index") + .settings(settings) + .mapping(mappings, XContentType.JSON); CreateIndexResponse createIndexResponse = client.indices().create( - new CreateIndexRequest("index", settings).mapping("_doc", mappings, XContentType.JSON), - RequestOptions.DEFAULT); + createIndexRequest, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index eb74421c7a1c6..68881206b487f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -20,7 +20,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; @@ -33,6 +32,7 @@ import org.elasticsearch.client.MlTestStateCleaner; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; import org.elasticsearch.client.ml.DeleteCalendarEventRequest; @@ -139,6 +139,7 @@ import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -869,7 +870,16 @@ public void testPreviewDatafeed() throws Exception { String datafeedId = job.getId() + "-feed"; String indexName = "preview_data_2"; CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); - createIndexRequest.mapping("_doc", "timestamp", "type=date", "total", "type=long"); + createIndexRequest.mapping(XContentFactory.jsonBuilder().startObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("total") + .field("type", "long") + .endObject() + .endObject() + .endObject()); highLevelClient().indices().create(createIndexRequest, RequestOptions.DEFAULT); DatafeedConfig datafeed = DatafeedConfig.builder(datafeedId, job.getId()) .setIndices(indexName) @@ -928,7 +938,16 @@ public void testStartDatafeed() throws Exception { String datafeedId = job.getId() + "-feed"; String indexName = "start_data_2"; CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); - createIndexRequest.mapping("_doc", "timestamp", "type=date", "total", "type=long"); + createIndexRequest.mapping(XContentFactory.jsonBuilder().startObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("total") + .field("type", "long") + .endObject() + .endObject() + .endObject()); highLevelClient().indices().create(createIndexRequest, RequestOptions.DEFAULT); DatafeedConfig datafeed = DatafeedConfig.builder(datafeedId, job.getId()) .setIndices(indexName) @@ -1048,7 +1067,16 @@ public void testGetDatafeedStats() throws Exception { String datafeedId1 = job.getId() + "-feed"; String indexName = "datafeed_stats_data_2"; CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); - createIndexRequest.mapping("_doc", "timestamp", "type=date", "total", "type=long"); + createIndexRequest.mapping(XContentFactory.jsonBuilder().startObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("total") + .field("type", "long") + .endObject() + .endObject() + .endObject()); highLevelClient().indices().create(createIndexRequest, RequestOptions.DEFAULT); DatafeedConfig datafeed = DatafeedConfig.builder(datafeedId1, job.getId()) .setIndices(indexName) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 3a49a13479e87..ff5deb5cbdfcc 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -23,8 +23,6 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.explain.ExplainRequest; @@ -52,11 +50,14 @@ import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.client.core.CountResponse; +import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.MatchQueryBuilder; @@ -1249,12 +1250,26 @@ public void onFailure(Exception e) { private void indexSearchTestData() throws IOException { CreateIndexRequest authorsRequest = new CreateIndexRequest("authors") - .mapping("_doc", "user", "type=keyword,doc_values=false"); + .mapping(XContentFactory.jsonBuilder().startObject() + .startObject("properties") + .startObject("user") + .field("type", "keyword") + .field("doc_values", "false") + .endObject() + .endObject() + .endObject()); CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest, RequestOptions.DEFAULT); assertTrue(authorsResponse.isAcknowledged()); CreateIndexRequest reviewersRequest = new CreateIndexRequest("contributors") - .mapping("_doc", "user", "type=keyword,store=true"); + .mapping(XContentFactory.jsonBuilder().startObject() + .startObject("properties") + .startObject("user") + .field("type", "keyword") + .field("store", "true") + .endObject() + .endObject() + .endObject()); CreateIndexResponse reviewersResponse = highLevelClient().indices().create(reviewersRequest, RequestOptions.DEFAULT); assertTrue(reviewersResponse.isAcknowledged()); @@ -1368,7 +1383,14 @@ public void onFailure(Exception e) { private static void indexCountTestData() throws IOException { CreateIndexRequest authorsRequest = new CreateIndexRequest("author") - .mapping("_doc", "user", "type=keyword,doc_values=false"); + .mapping(XContentFactory.jsonBuilder().startObject() + .startObject("properties") + .startObject("user") + .field("type", "keyword") + .field("doc_values", "false") + .endObject() + .endObject() + .endObject()); CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest, RequestOptions.DEFAULT); assertTrue(authorsResponse.isAcknowledged()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index 22ef30c92b78c..d80c24be6618a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -38,7 +38,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; @@ -46,6 +45,7 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/CreateIndexRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/CreateIndexRequestTests.java new file mode 100644 index 0000000000000..374f024401155 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/CreateIndexRequestTests.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Set; +import java.util.function.Predicate; + +import static org.elasticsearch.client.indices.CreateIndexRequest.ALIASES; +import static org.elasticsearch.client.indices.CreateIndexRequest.MAPPINGS; +import static org.elasticsearch.client.indices.CreateIndexRequest.SETTINGS; + +public class CreateIndexRequestTests extends AbstractXContentTestCase { + + @Override + protected CreateIndexRequest createTestInstance() { + return RandomCreateIndexGenerator.randomCreateIndexRequest(); + } + + @Override + protected CreateIndexRequest doParseInstance(XContentParser parser) throws IOException { + return new CreateIndexRequest("index").source(parser.map()); + } + + @Override + protected void assertEqualInstances(CreateIndexRequest expected, CreateIndexRequest actual) { + assertEquals(expected.settings(), actual.settings()); + assertAliasesEqual(expected.aliases(), actual.aliases()); + assertMappingsEqual(expected, actual); + } + + private void assertMappingsEqual(CreateIndexRequest expected, CreateIndexRequest actual) { + if (expected.mappings() == null) { + assertNull(actual.mappings()); + } else { + assertNotNull(actual.mappings()); + try (XContentParser expectedJson = createParser(expected.mappingsXContentType().xContent(), expected.mappings()); + XContentParser actualJson = createParser(actual.mappingsXContentType().xContent(), actual.mappings())) { + assertEquals(expectedJson.map(), actualJson.map()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + private void assertAliasesEqual(Set expected, Set actual) { + assertEquals(expected, actual); + + for (Alias expectedAlias : expected) { + for (Alias actualAlias : actual) { + if (expectedAlias.equals(actualAlias)) { + // As Alias#equals only looks at name, we check the equality of the other Alias parameters here. + assertEquals(expectedAlias.filter(), actualAlias.filter()); + assertEquals(expectedAlias.indexRouting(), actualAlias.indexRouting()); + assertEquals(expectedAlias.searchRouting(), actualAlias.searchRouting()); + } + } + } + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> field.startsWith(MAPPINGS.getPreferredName()) + || field.startsWith(SETTINGS.getPreferredName()) + || field.startsWith(ALIASES.getPreferredName()); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java index 50224aa1b9ad7..e4fd0708b540c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/RandomCreateIndexGenerator.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/RandomCreateIndexGenerator.java new file mode 100644 index 0000000000000..179b7e728b620 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/RandomCreateIndexGenerator.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; + +import java.io.IOException; + +public class RandomCreateIndexGenerator { + + /** + * Returns a random {@link CreateIndexRequest}. + * + * Randomizes the index name, the aliases, mappings and settings associated with the + * index. When present, the mappings make no mention of types. + */ + public static CreateIndexRequest randomCreateIndexRequest() { + try { + // Create a random server request, and copy its contents into the HLRC request. + // Because client requests only accept typeless mappings, we must swap out the + // mapping definition for one that does not contain types. + org.elasticsearch.action.admin.indices.create.CreateIndexRequest serverRequest = + org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest(); + return new CreateIndexRequest(serverRequest.index()) + .settings(serverRequest.settings()) + .aliases(serverRequest.aliases()) + .mapping(randomMapping()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates a random mapping, with no mention of types. + */ + public static XContentBuilder randomMapping() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + org.elasticsearch.index.RandomCreateIndexGenerator.randomMappingFields(builder, true); + builder.endObject(); + return builder; + } +} diff --git a/docs/java-rest/high-level/indices/create_index.asciidoc b/docs/java-rest/high-level/indices/create_index.asciidoc index 997b860b2786f..e6352d481ef86 100644 --- a/docs/java-rest/high-level/indices/create_index.asciidoc +++ b/docs/java-rest/high-level/indices/create_index.asciidoc @@ -55,13 +55,6 @@ include-tagged::{doc-tests-file}[{api}-mappings-xcontent] <1> Mapping source provided as an `XContentBuilder` object, the Elasticsearch built-in helpers to generate JSON content -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-mappings-shortcut] --------------------------------------------------- -<1> Mapping source provided as `Object` key-pairs, which gets converted to -JSON format - ==== Index aliases Aliases can be set at index creation time diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java index ed2724b95bb47..61e8af47a43d5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java @@ -19,9 +19,11 @@ package org.elasticsearch.rest.action.admin.indices; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; @@ -37,6 +39,11 @@ import java.util.Map; public class RestCreateIndexAction extends BaseRestHandler { + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestPutMappingAction.class)); + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Using include_type_name in create " + + "index requests is deprecated. The parameter will be removed in the next major version."; + public RestCreateIndexAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(RestRequest.Method.PUT, "/{index}", this); @@ -51,6 +58,11 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final boolean includeTypeName = request.paramAsBoolean(INCLUDE_TYPE_NAME_PARAMETER, DEFAULT_INCLUDE_TYPE_NAME_POLICY); + + if (request.hasParam(INCLUDE_TYPE_NAME_PARAMETER)) { + deprecationLogger.deprecatedAndMaybeLog("create_index_with_types", TYPES_DEPRECATION_MESSAGE); + } + CreateIndexRequest createIndexRequest = new CreateIndexRequest(request.param("index")); if (request.hasContent()) { Map sourceAsMap = XContentHelper.convertToMap(request.content(), false, request.getXContentType()).v2(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java index 593cdc60e8a23..53e188e36c9a8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java @@ -134,7 +134,7 @@ private static PutMappingRequest createTestItem() throws IOException { String type = randomAlphaOfLength(5); request.type(type); - request.source(RandomCreateIndexGenerator.randomMapping()); + request.source(RandomCreateIndexGenerator.randomMapping(type)); return request; } diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexActionTests.java new file mode 100644 index 0000000000000..1ec0a0f949965 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexActionTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.RestActionTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; +import static org.mockito.Mockito.mock; + +public class RestCreateIndexActionTests extends RestActionTestCase { + private RestCreateIndexAction action; + + @Before + public void setupAction() { + action = new RestCreateIndexAction(Settings.EMPTY, controller()); + } + + public void testIncludeTypeName() throws IOException { + Map params = new HashMap<>(); + params.put(INCLUDE_TYPE_NAME_PARAMETER, randomFrom("true", "false")); + RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.PUT) + .withPath("/some_index") + .withParams(params) + .build(); + + action.prepareRequest(deprecatedRequest, mock(NodeClient.class)); + assertWarnings(RestCreateIndexAction.TYPES_DEPRECATION_MESSAGE); + + RestRequest validRequest = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.PUT) + .withPath("/some_index") + .build(); + action.prepareRequest(validRequest, mock(NodeClient.class)); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java b/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java index e4836150c6e86..345ef1f58bcac 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java @@ -40,8 +40,10 @@ public final class RandomCreateIndexGenerator { private RandomCreateIndexGenerator() {} /** - * Returns a random {@link CreateIndexRequest}. Randomizes the index name, the aliases, - * mappings and settings associated with the index. + * Returns a random {@link CreateIndexRequest}. + * + * Randomizes the index name, the aliases, mappings and settings associated with the + * index. If present, the mapping definition will be nested under a type name. */ public static CreateIndexRequest randomCreateIndexRequest() throws IOException { String index = randomAlphaOfLength(5); @@ -78,20 +80,6 @@ public static Settings randomIndexSettings() { return builder.build(); } - - /** - * Creates a random mapping, with no mention of types. - */ - public static XContentBuilder randomMapping() throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); - builder.startObject(); - - randomMappingFields(builder, true); - - builder.endObject(); - return builder; - } - /** * Creates a random mapping, with the mapping definition nested * under the given type name. diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java index 9507c5e12f8c2..136f51bed3c8c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java @@ -19,8 +19,8 @@ package org.elasticsearch.test; -import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; From 967de04257fb57459e21625de65e81d66705fc2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 24 Jan 2019 22:54:55 +0100 Subject: [PATCH 19/64] Uppercasing some docs section title (#37781) Section titles are mostly uppercase, only a few cases where query DSL parameters or Java method names are used as the title they should be lowercased. --- docs/community-clients/index.asciidoc | 2 +- .../bucket/significantterms-aggregation.asciidoc | 6 +++--- docs/reference/migration/migrate_7_0/mappings.asciidoc | 4 ++-- docs/reference/testing/testing-framework.asciidoc | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index 071eadb8bc53e..58a6e625aa8b3 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -131,7 +131,7 @@ The following project appears to be abandoned: Node.js client for the Elasticsearch REST API [[kotlin]] -== kotlin +== Kotlin * https://github.com/mbuhot/eskotlin[ES Kotlin]: Elasticsearch Query DSL for kotlin based on the {client}/java-api/current/index.html[official Elasticsearch Java client]. diff --git a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc index 1c615e795c6a4..e29fbac0c5649 100644 --- a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc @@ -337,7 +337,7 @@ The JLH score can be used as a significance score by adding the parameter The scores are derived from the doc frequencies in _foreground_ and _background_ sets. The _absolute_ change in popularity (foregroundPercent - backgroundPercent) would favor common terms whereas the _relative_ change in popularity (foregroundPercent/ backgroundPercent) would favor rare terms. Rare vs common is essentially a precision vs recall balance and so the absolute and relative changes are multiplied to provide a sweet spot between precision and recall. -===== mutual information +===== Mutual information Mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1 can be used as significance score by adding the parameter [source,js] @@ -373,7 +373,7 @@ Chi square as described in "Information Retrieval", Manning et al., Chapter 13.5 Chi square behaves like mutual information and can be configured with the same parameters `include_negatives` and `background_is_superset`. -===== google normalized distance +===== Google normalized distance Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007 (http://arxiv.org/pdf/cs/0412098v3.pdf) can be used as significance score by adding the parameter [source,js] @@ -412,7 +412,7 @@ It is hard to say which one of the different heuristics will be the best choice If none of the above measures suits your usecase than another option is to implement a custom significance measure: -===== scripted +===== Scripted Customized scores can be implemented via a script: [source,js] diff --git a/docs/reference/migration/migrate_7_0/mappings.asciidoc b/docs/reference/migration/migrate_7_0/mappings.asciidoc index e4d38d9a64374..653dd2fb4ca46 100644 --- a/docs/reference/migration/migrate_7_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_7_0/mappings.asciidoc @@ -67,8 +67,8 @@ should also be changed in the template to explicitly define `tree` to one of `ge or `quadtree`. This will ensure compatibility with previously created indexes. [float] -==== deprecated `geo_shape` parameters +==== Deprecated `geo_shape` parameters The following type parameters are deprecated for the `geo_shape` field type: `tree`, `precision`, `tree_levels`, `distance_error_pct`, `points_only`, and `strategy`. They -will be removed in a future version. \ No newline at end of file +will be removed in a future version. diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc index 321122d81f506..9603ac6c703b1 100644 --- a/docs/reference/testing/testing-framework.asciidoc +++ b/docs/reference/testing/testing-framework.asciidoc @@ -8,7 +8,7 @@ Testing is a crucial part of your application, and as information retrieval itse [[why-randomized-testing]] -=== why randomized testing? +=== Why randomized testing? The key concept of randomized testing is not to use the same input values for every testcase, but still be able to reproduce it in case of a failure. This allows to test with vastly different input variables in order to make sure, that your implementation is actually independent from your provided test data. @@ -48,7 +48,7 @@ We provide a few classes that you can inherit from in your own test classes whic [[unit-tests]] -=== unit tests +=== Unit tests If your test is a well isolated unit test which doesn't need a running Elasticsearch cluster, you can use the `ESTestCase`. If you are testing lucene features, use `ESTestCase` and if you are testing concrete token streams, use the `ESTokenStreamTestCase` class. Those specific classes execute additional checks which ensure that no resources leaks are happening, after the test has run. From d473bcda8d9fcb6330efe333f662f1c214ab631c Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Thu, 24 Jan 2019 14:08:58 -0800 Subject: [PATCH 20/64] Remove outdated callouts from the 'create index' HLRC docs --- docs/java-rest/high-level/indices/create_index.asciidoc | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/java-rest/high-level/indices/create_index.asciidoc b/docs/java-rest/high-level/indices/create_index.asciidoc index e6352d481ef86..004279ba2a892 100644 --- a/docs/java-rest/high-level/indices/create_index.asciidoc +++ b/docs/java-rest/high-level/indices/create_index.asciidoc @@ -84,14 +84,12 @@ The following arguments can optionally be provided: include-tagged::{doc-tests-file}[{api}-request-timeout] -------------------------------------------------- <1> Timeout to wait for the all the nodes to acknowledge the index creation as a `TimeValue` -<2> Timeout to wait for the all the nodes to acknowledge the index creation as a `String` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request-masterTimeout] -------------------------------------------------- <1> Timeout to connect to the master node as a `TimeValue` -<2> Timeout to connect to the master node as a `String` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- From 0f3c542850a477560d72935170808a6130cd3aca Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Thu, 24 Jan 2019 15:31:31 -0700 Subject: [PATCH 21/64] Deprecate xpack.watcher.history.cleaner_service.enabled (#37782) This deprecates the `xpack.watcher.history.cleaner_service.enabled` setting, since all newly created `.watch-history` indices in 7.0 will use ILM to manage their retention. In 8.0 the setting itself and cleanup actions will be removed. Resolves #32041 --- docs/reference/monitoring/exporters.asciidoc | 9 --------- docs/reference/settings/monitoring-settings.asciidoc | 5 ----- docs/reference/settings/notification-settings.asciidoc | 1 + .../org/elasticsearch/xpack/monitoring/Monitoring.java | 4 ++-- 4 files changed, 3 insertions(+), 16 deletions(-) diff --git a/docs/reference/monitoring/exporters.asciidoc b/docs/reference/monitoring/exporters.asciidoc index a1d4bc08ae73f..fee09015dbbd9 100644 --- a/docs/reference/monitoring/exporters.asciidoc +++ b/docs/reference/monitoring/exporters.asciidoc @@ -56,15 +56,6 @@ particularly the monitoring indices. To do so, you can take advantage of the //TO-DO: Add information about index lifecycle management https://github.com/elastic/x-pack-elasticsearch/issues/2814 -When using cluster alerts, {watcher} creates daily `.watcher_history*` indices. -These are not managed by {monitoring} and they are not curated automatically. It -is therefore critical that you curate these indices to avoid an undesirable and -unexpected increase in the number of shards and indices and eventually the -amount of disk usage. If you are using a `local` exporter, you can set the -`xpack.watcher.history.cleaner_service.enabled` setting to `true` and curate the -`.watcher_history*` indices by using the -<>. See <>. - There is also a disk watermark (known as the flood stage watermark), which protects clusters from running out of disk space. When this feature is triggered, it makes all indices (including monitoring indices) diff --git a/docs/reference/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc index 59a766d4dd0ca..c633088bc5ed4 100644 --- a/docs/reference/settings/monitoring-settings.asciidoc +++ b/docs/reference/settings/monitoring-settings.asciidoc @@ -105,11 +105,6 @@ being monitored, and it cannot be disabled. IMPORTANT: This setting currently only impacts `local`-type exporters. Indices created using the `http` exporter will not be deleted automatically. - -If both {monitoring} and {watcher} are enabled, you can use this setting to -affect the {watcher} cleaner service too. For more information, see the -`xpack.watcher.history.cleaner_service.enabled` setting in the -<>. -- `xpack.monitoring.exporters`:: diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index 2f14dd276b849..c3f0ca3b8ce36 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -37,6 +37,7 @@ required. For more information, see `xpack.watcher.history.cleaner_service.enabled`:: added[6.3.0,Default changed to `true`.] +deprecated[7.0.0,Watcher history indices are now managed by the `watch-history-ilm-policy` ILM policy] + Set to `true` (default) to enable the cleaner service. If this setting is `true`, the `xpack.monitoring.enabled` setting must also be set to `true` with diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java index 2913a0ec9c0df..b054eca1cc319 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java @@ -77,9 +77,9 @@ public class Monitoring extends Plugin implements ActionPlugin { /** * The ability to automatically cleanup ".watcher_history*" indices while also cleaning up Monitoring indices. */ + @Deprecated public static final Setting CLEAN_WATCHER_HISTORY = boolSetting("xpack.watcher.history.cleaner_service.enabled", - true, - Setting.Property.Dynamic, Setting.Property.NodeScope); + true, Setting.Property.Dynamic, Setting.Property.NodeScope, Setting.Property.Deprecated); protected final Settings settings; private final boolean enabled; From 4313c2d4a77818094b66791f6afa69238ae9f5f8 Mon Sep 17 00:00:00 2001 From: Peter Dyson Date: Fri, 25 Jan 2019 08:39:58 +1000 Subject: [PATCH 22/64] [DOCS] More info on disabling swap (#37248) * [DOCS] More info on disabling swap, inform that no Elasticsearch restart is required. --- docs/reference/setup/sysconfig/swap.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/setup/sysconfig/swap.asciidoc b/docs/reference/setup/sysconfig/swap.asciidoc index c936cdf964370..d41fe8abf8d20 100644 --- a/docs/reference/setup/sysconfig/swap.asciidoc +++ b/docs/reference/setup/sysconfig/swap.asciidoc @@ -28,6 +28,8 @@ On Linux systems, you can disable swap temporarily by running: sudo swapoff -a -------------- +This doesn't require a restart of Elasticsearch. + To disable it permanently, you will need to edit the `/etc/fstab` file and comment out any lines that contain the word `swap`. From 9d87ca567a9adf718ed3583d7423e42d42189a88 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 24 Jan 2019 23:40:12 +0100 Subject: [PATCH 23/64] 300_sequence_numbers should not rely on 7.0 total hits structure --- .../rest-api-spec/test/search/300_sequence_numbers.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml index 716740653111a..61bbfdcc267ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml @@ -33,7 +33,6 @@ setup: foo: bar seq_no_primary_term: true - - match: {hits.total.value: 1} - match: {hits.hits.0._seq_no: 1} - gte: {hits.hits.0._primary_term: 1} @@ -48,7 +47,6 @@ setup: foo: bar seq_no_primary_term: true - - match: {hits.total.value: 1} - match: {hits.hits.0._seq_no: 1} - gte: {hits.hits.0._primary_term: 1} From a30ce6a00a9aa4c82320c9d8f4427652ce9078fd Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Thu, 24 Jan 2019 19:18:48 -0500 Subject: [PATCH 24/64] Rename feature, feature_vector and feature_query (#37794) Ranaming as follows: feature -> rank_feature feature_vector -> rank_features feature query -> rank_feature query Ranaming is done to distinguish from other vector types. Closes #36723 --- docs/reference/mapping/types.asciidoc | 8 +-- docs/reference/mapping/types/feature.asciidoc | 57 ------------------- .../mapping/types/rank-feature.asciidoc | 57 +++++++++++++++++++ ...vector.asciidoc => rank-features.asciidoc} | 26 ++++----- ...y.asciidoc => rank-feature-query.asciidoc} | 40 ++++++------- .../query-dsl/script-score-query.asciidoc | 2 +- .../query-dsl/special-queries.asciidoc | 4 +- .../index/mapper/MapperExtrasPlugin.java | 11 ++-- ...apper.java => RankFeatureFieldMapper.java} | 50 ++++++++-------- ...r.java => RankFeatureMetaFieldMapper.java} | 28 ++++----- ...pper.java => RankFeaturesFieldMapper.java} | 54 +++++++++--------- ...lder.java => RankFeatureQueryBuilder.java} | 40 ++++++------- ....java => RankFeatureFieldMapperTests.java} | 14 ++--- ...ts.java => RankFeatureFieldTypeTests.java} | 8 +-- ...a => RankFeatureMetaFieldMapperTests.java} | 6 +- ...ava => RankFeatureMetaFieldTypeTests.java} | 4 +- ...java => RankFeaturesFieldMapperTests.java} | 18 +++--- ...s.java => RankFeaturesFieldTypeTests.java} | 4 +- ...java => RankFeatureQueryBuilderTests.java} | 25 ++++---- .../{feature => rank_feature}/10_basic.yml | 18 +++--- .../10_basic.yml | 10 ++-- 21 files changed, 243 insertions(+), 241 deletions(-) delete mode 100644 docs/reference/mapping/types/feature.asciidoc create mode 100644 docs/reference/mapping/types/rank-feature.asciidoc rename docs/reference/mapping/types/{feature-vector.asciidoc => rank-features.asciidoc} (50%) rename docs/reference/query-dsl/{feature-query.asciidoc => rank-feature-query.asciidoc} (83%) rename modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/{FeatureFieldMapper.java => RankFeatureFieldMapper.java} (80%) rename modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/{FeatureMetaFieldMapper.java => RankFeatureMetaFieldMapper.java} (81%) rename modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/{FeatureVectorFieldMapper.java => RankFeaturesFieldMapper.java} (70%) rename modules/mapper-extras/src/main/java/org/elasticsearch/index/query/{FeatureQueryBuilder.java => RankFeatureQueryBuilder.java} (89%) rename modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/{FeatureFieldMapperTests.java => RankFeatureFieldMapperTests.java} (93%) rename modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/{FeatureFieldTypeTests.java => RankFeatureFieldTypeTests.java} (79%) rename modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/{FeatureMetaFieldMapperTests.java => RankFeatureMetaFieldMapperTests.java} (90%) rename modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/{FeatureVectorFieldTypeTests.java => RankFeatureMetaFieldTypeTests.java} (86%) rename modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/{FeatureVectorFieldMapperTests.java => RankFeaturesFieldMapperTests.java} (87%) rename modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/{FeatureMetaFieldTypeTests.java => RankFeaturesFieldTypeTests.java} (87%) rename modules/mapper-extras/src/test/java/org/elasticsearch/index/query/{FeatureQueryBuilderTests.java => RankFeatureQueryBuilderTests.java} (82%) rename modules/mapper-extras/src/test/resources/rest-api-spec/test/{feature => rank_feature}/10_basic.yml (89%) rename modules/mapper-extras/src/test/resources/rest-api-spec/test/{feature_vector => rank_features}/10_basic.yml (89%) diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index 5daa01ea7de1a..d8fa113ec2354 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -43,9 +43,9 @@ string:: <> and <> <>:: Defines an alias to an existing field. -<>:: Record numeric features to boost hits at query time. +<>:: Record numeric feature to boost hits at query time. -<>:: Record numeric feature vectors to boost hits at query time. +<>:: Record numeric features to boost hits at query time. <>:: Record dense vectors of float values. @@ -100,9 +100,9 @@ include::types/percolator.asciidoc[] include::types/parent-join.asciidoc[] -include::types/feature.asciidoc[] +include::types/rank-feature.asciidoc[] -include::types/feature-vector.asciidoc[] +include::types/rank-features.asciidoc[] include::types/dense-vector.asciidoc[] diff --git a/docs/reference/mapping/types/feature.asciidoc b/docs/reference/mapping/types/feature.asciidoc deleted file mode 100644 index 7fe8ff6f935af..0000000000000 --- a/docs/reference/mapping/types/feature.asciidoc +++ /dev/null @@ -1,57 +0,0 @@ -[[feature]] -=== Feature datatype - -A `feature` field can index numbers so that they can later be used to boost -documents in queries with a <> query. - -[source,js] --------------------------------------------------- -PUT my_index -{ - "mappings": { - "properties": { - "pagerank": { - "type": "feature" <1> - }, - "url_length": { - "type": "feature", - "positive_score_impact": false <2> - } - } - } -} - -PUT my_index/_doc/1 -{ - "pagerank": 8, - "url_length": 22 -} - -GET my_index/_search -{ - "query": { - "feature": { - "field": "pagerank" - } - } -} --------------------------------------------------- -// CONSOLE -<1> Feature fields must use the `feature` field type -<2> Features that correlate negatively with the score need to declare it - -NOTE: `feature` fields only support single-valued fields and strictly positive -values. Multi-valued fields and negative values will be rejected. - -NOTE: `feature` fields do not support querying, sorting or aggregating. They may -only be used within <> queries. - -NOTE: `feature` fields only preserve 9 significant bits for the precision, which -translates to a relative error of about 0.4%. - -Features that correlate negatively with the score should set -`positive_score_impact` to `false` (defaults to `true`). This will be used by -the <> query to modify the scoring formula -in such a way that the score decreases with the value of the feature instead of -increasing. For instance in web search, the url length is a commonly used -feature which correlates negatively with scores. diff --git a/docs/reference/mapping/types/rank-feature.asciidoc b/docs/reference/mapping/types/rank-feature.asciidoc new file mode 100644 index 0000000000000..780a68216f49e --- /dev/null +++ b/docs/reference/mapping/types/rank-feature.asciidoc @@ -0,0 +1,57 @@ +[[rank-feature]] +=== Rank feature datatype + +A `rank_feature` field can index numbers so that they can later be used to boost +documents in queries with a <> query. + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "properties": { + "pagerank": { + "type": "rank_feature" <1> + }, + "url_length": { + "type": "rank_feature", + "positive_score_impact": false <2> + } + } + } +} + +PUT my_index/_doc/1 +{ + "pagerank": 8, + "url_length": 22 +} + +GET my_index/_search +{ + "query": { + "rank_feature": { + "field": "pagerank" + } + } +} +-------------------------------------------------- +// CONSOLE +<1> Rank feature fields must use the `rank_feature` field type +<2> Rank features that correlate negatively with the score need to declare it + +NOTE: `rank_feature` fields only support single-valued fields and strictly positive +values. Multi-valued fields and negative values will be rejected. + +NOTE: `rank_feature` fields do not support querying, sorting or aggregating. They may +only be used within <> queries. + +NOTE: `rank_feature` fields only preserve 9 significant bits for the precision, which +translates to a relative error of about 0.4%. + +Rank features that correlate negatively with the score should set +`positive_score_impact` to `false` (defaults to `true`). This will be used by +the <> query to modify the scoring formula +in such a way that the score decreases with the value of the feature instead of +increasing. For instance in web search, the url length is a commonly used +feature which correlates negatively with scores. diff --git a/docs/reference/mapping/types/feature-vector.asciidoc b/docs/reference/mapping/types/rank-features.asciidoc similarity index 50% rename from docs/reference/mapping/types/feature-vector.asciidoc rename to docs/reference/mapping/types/rank-features.asciidoc index b4701fc9ab7dd..9bc960b7f8351 100644 --- a/docs/reference/mapping/types/feature-vector.asciidoc +++ b/docs/reference/mapping/types/rank-features.asciidoc @@ -1,11 +1,11 @@ -[[feature-vector]] -=== Feature vector datatype +[[rank-features]] +=== Rank features datatype -A `feature_vector` field can index numeric feature vectors, so that they can +A `rank_features` field can index numeric feature vectors, so that they can later be used to boost documents in queries with a -<> query. +<> query. -It is analogous to the <> datatype but is better suited +It is analogous to the <> datatype but is better suited when the list of features is sparse so that it wouldn't be reasonable to add one field to the mappings for each of them. @@ -16,7 +16,7 @@ PUT my_index "mappings": { "properties": { "topics": { - "type": "feature_vector" <1> + "type": "rank_features" <1> } } } @@ -41,22 +41,22 @@ PUT my_index/_doc/2 GET my_index/_search { "query": { - "feature": { + "rank_feature": { "field": "topics.politics" } } } -------------------------------------------------- // CONSOLE -<1> Feature vector fields must use the `feature_vector` field type -<2> Feature vector fields must be a hash with string keys and strictly positive numeric values +<1> Rank features fields must use the `rank_features` field type +<2> Rank features fields must be a hash with string keys and strictly positive numeric values -NOTE: `feature_vector` fields only support single-valued features and strictly +NOTE: `rank_features` fields only support single-valued features and strictly positive values. Multi-valued fields and zero or negative values will be rejected. -NOTE: `feature_vector` fields do not support sorting or aggregating and may -only be queried using <> queries. +NOTE: `rank_features` fields do not support sorting or aggregating and may +only be queried using <> queries. -NOTE: `feature_vector` fields only preserve 9 significant bits for the +NOTE: `rank_features` fields only preserve 9 significant bits for the precision, which translates to a relative error of about 0.4%. diff --git a/docs/reference/query-dsl/feature-query.asciidoc b/docs/reference/query-dsl/rank-feature-query.asciidoc similarity index 83% rename from docs/reference/query-dsl/feature-query.asciidoc rename to docs/reference/query-dsl/rank-feature-query.asciidoc index 353c135bf8efd..277d45f257d02 100644 --- a/docs/reference/query-dsl/feature-query.asciidoc +++ b/docs/reference/query-dsl/rank-feature-query.asciidoc @@ -1,8 +1,8 @@ -[[query-dsl-feature-query]] -=== Feature Query +[[query-dsl-rank-feature-query]] +=== Rank Feature Query -The `feature` query is a specialized query that only works on -<> fields and <> fields. +The `rank_feature` query is a specialized query that only works on +<> fields and <> fields. Its goal is to boost the score of documents based on the values of numeric features. It is typically put in a `should` clause of a <> query so that its score is added to the score @@ -32,14 +32,14 @@ PUT test "mappings": { "properties": { "pagerank": { - "type": "feature" + "type": "rank_feature" }, "url_length": { - "type": "feature", + "type": "rank_feature", "positive_score_impact": false }, "topics": { - "type": "feature_vector" + "type": "rank_features" } } } @@ -97,18 +97,18 @@ GET test/_search ], "should": [ { - "feature": { + "rank_feature": { "field": "pagerank" } }, { - "feature": { + "rank_feature": { "field": "url_length", "boost": 0.1 } }, { - "feature": { + "rank_feature": { "field": "topics.sports", "boost": 0.4 } @@ -123,8 +123,8 @@ GET test/_search [float] === Supported functions -The `feature` query supports 3 functions in order to boost scores using the -values of features. If you do not know where to start, we recommend that you +The `rank_feature` query supports 3 functions in order to boost scores using the +values of rank features. If you do not know where to start, we recommend that you start with the `saturation` function, which is the default when no function is provided. @@ -132,11 +132,11 @@ provided. ==== Saturation This function gives a score that is equal to `S / (S + pivot)` where `S` is the -value of the feature and `pivot` is a configurable pivot value so that the +value of the rank feature and `pivot` is a configurable pivot value so that the result will be less than +0.5+ if `S` is less than pivot and greater than +0.5+ otherwise. Scores are always is +(0, 1)+. -If the feature has a negative score impact then the function will be computed as +If the rank feature has a negative score impact then the function will be computed as `pivot / (S + pivot)`, which decreases when `S` increases. [source,js] @@ -144,7 +144,7 @@ If the feature has a negative score impact then the function will be computed as GET test/_search { "query": { - "feature": { + "rank_feature": { "field": "pagerank", "saturation": { "pivot": 8 @@ -166,7 +166,7 @@ train a good pivot value. GET test/_search { "query": { - "feature": { + "rank_feature": { "field": "pagerank", "saturation": {} } @@ -180,17 +180,17 @@ GET test/_search ==== Logarithm This function gives a score that is equal to `log(scaling_factor + S)` where -`S` is the value of the feature and `scaling_factor` is a configurable scaling +`S` is the value of the rank feature and `scaling_factor` is a configurable scaling factor. Scores are unbounded. -This function only supports features that have a positive score impact. +This function only supports rank features that have a positive score impact. [source,js] -------------------------------------------------- GET test/_search { "query": { - "feature": { + "rank_feature": { "field": "pagerank", "log": { "scaling_factor": 4 @@ -219,7 +219,7 @@ that you stick to the `saturation` function instead. GET test/_search { "query": { - "feature": { + "rank_feature": { "field": "pagerank", "sigmoid": { "pivot": 7, diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index e77791270318f..cdcfd0f0a5032 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -211,7 +211,7 @@ There are faster alternative query types that can efficiently skip non-competitive hits: * If you want to boost documents on some static fields, use - <>. + <>. ==== Transition from Function Score Query diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index 7d36d01c8d6f5..04ab2d53f6d35 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -23,7 +23,7 @@ A query that allows to modify the score of a sub-query with a script. This query finds queries that are stored as documents that match with the specified document. -<>:: +<>:: A query that computes scores based on the values of numeric features and is able to efficiently skip non-competitive hits. @@ -40,6 +40,6 @@ include::script-score-query.asciidoc[] include::percolate-query.asciidoc[] -include::feature-query.asciidoc[] +include::rank-feature-query.asciidoc[] include::wrapper-query.asciidoc[] diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java index a875ebbb079ef..cbafd0fd1efff 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.index.mapper.MetadataFieldMapper.TypeParser; -import org.elasticsearch.index.query.FeatureQueryBuilder; +import org.elasticsearch.index.query.RankFeatureQueryBuilder; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; @@ -37,8 +37,8 @@ public Map getMappers() { Map mappers = new LinkedHashMap<>(); mappers.put(ScaledFloatFieldMapper.CONTENT_TYPE, new ScaledFloatFieldMapper.TypeParser()); mappers.put(TokenCountFieldMapper.CONTENT_TYPE, new TokenCountFieldMapper.TypeParser()); - mappers.put(FeatureFieldMapper.CONTENT_TYPE, new FeatureFieldMapper.TypeParser()); - mappers.put(FeatureVectorFieldMapper.CONTENT_TYPE, new FeatureVectorFieldMapper.TypeParser()); + mappers.put(RankFeatureFieldMapper.CONTENT_TYPE, new RankFeatureFieldMapper.TypeParser()); + mappers.put(RankFeaturesFieldMapper.CONTENT_TYPE, new RankFeaturesFieldMapper.TypeParser()); mappers.put(DenseVectorFieldMapper.CONTENT_TYPE, new DenseVectorFieldMapper.TypeParser()); mappers.put(SparseVectorFieldMapper.CONTENT_TYPE, new SparseVectorFieldMapper.TypeParser()); return Collections.unmodifiableMap(mappers); @@ -46,13 +46,14 @@ public Map getMappers() { @Override public Map getMetadataMappers() { - return Collections.singletonMap(FeatureMetaFieldMapper.CONTENT_TYPE, new FeatureMetaFieldMapper.TypeParser()); + return Collections.singletonMap(RankFeatureMetaFieldMapper.CONTENT_TYPE, new RankFeatureMetaFieldMapper.TypeParser()); } @Override public List> getQueries() { return Collections.singletonList( - new QuerySpec<>(FeatureQueryBuilder.NAME, FeatureQueryBuilder::new, p -> FeatureQueryBuilder.PARSER.parse(p, null))); + new QuerySpec<>(RankFeatureQueryBuilder.NAME, RankFeatureQueryBuilder::new, + p -> RankFeatureQueryBuilder.PARSER.parse(p, null))); } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureFieldMapper.java similarity index 80% rename from modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java rename to modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureFieldMapper.java index 25747807b6a35..4cdc9463cdca3 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureFieldMapper.java @@ -42,12 +42,12 @@ /** * A {@link FieldMapper} that exposes Lucene's {@link FeatureField}. */ -public class FeatureFieldMapper extends FieldMapper { +public class RankFeatureFieldMapper extends FieldMapper { - public static final String CONTENT_TYPE = "feature"; + public static final String CONTENT_TYPE = "rank_feature"; public static class Defaults { - public static final MappedFieldType FIELD_TYPE = new FeatureFieldType(); + public static final MappedFieldType FIELD_TYPE = new RankFeatureFieldType(); static { FIELD_TYPE.setTokenized(false); @@ -58,7 +58,7 @@ public static class Defaults { } } - public static class Builder extends FieldMapper.Builder { + public static class Builder extends FieldMapper.Builder { public Builder(String name) { super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); @@ -66,8 +66,8 @@ public Builder(String name) { } @Override - public FeatureFieldType fieldType() { - return (FeatureFieldType) super.fieldType(); + public RankFeatureFieldType fieldType() { + return (RankFeatureFieldType) super.fieldType(); } public Builder positiveScoreImpact(boolean v) { @@ -76,9 +76,9 @@ public Builder positiveScoreImpact(boolean v) { } @Override - public FeatureFieldMapper build(BuilderContext context) { + public RankFeatureFieldMapper build(BuilderContext context) { setupFieldType(context); - return new FeatureFieldMapper( + return new RankFeatureFieldMapper( name, fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); } @@ -87,7 +87,7 @@ public FeatureFieldMapper build(BuilderContext context) { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - FeatureFieldMapper.Builder builder = new FeatureFieldMapper.Builder(name); + RankFeatureFieldMapper.Builder builder = new RankFeatureFieldMapper.Builder(name); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); String propName = entry.getKey(); @@ -101,22 +101,22 @@ public Mapper.Builder parse(String name, Map node, ParserCo } } - public static final class FeatureFieldType extends MappedFieldType { + public static final class RankFeatureFieldType extends MappedFieldType { private boolean positiveScoreImpact = true; - public FeatureFieldType() { + public RankFeatureFieldType() { setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); } - protected FeatureFieldType(FeatureFieldType ref) { + protected RankFeatureFieldType(RankFeatureFieldType ref) { super(ref); this.positiveScoreImpact = ref.positiveScoreImpact; } - public FeatureFieldType clone() { - return new FeatureFieldType(this); + public RankFeatureFieldType clone() { + return new RankFeatureFieldType(this); } @Override @@ -124,7 +124,7 @@ public boolean equals(Object o) { if (super.equals(o) == false) { return false; } - FeatureFieldType other = (FeatureFieldType) o; + RankFeatureFieldType other = (RankFeatureFieldType) o; return Objects.equals(positiveScoreImpact, other.positiveScoreImpact); } @@ -138,7 +138,7 @@ public int hashCode() { @Override public void checkCompatibility(MappedFieldType other, List conflicts) { super.checkCompatibility(other, conflicts); - if (positiveScoreImpact != ((FeatureFieldType) other).positiveScoreImpact()) { + if (positiveScoreImpact != ((RankFeatureFieldType) other).positiveScoreImpact()) { conflicts.add("mapper [" + name() + "] has different [positive_score_impact] values"); } } @@ -164,29 +164,29 @@ public Query existsQuery(QueryShardContext context) { @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { - throw new UnsupportedOperationException("[feature] fields do not support sorting, scripting or aggregating"); + throw new UnsupportedOperationException("[rank_feature] fields do not support sorting, scripting or aggregating"); } @Override public Query termQuery(Object value, QueryShardContext context) { - throw new UnsupportedOperationException("Queries on [feature] fields are not supported"); + throw new UnsupportedOperationException("Queries on [rank_feature] fields are not supported"); } } - private FeatureFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + private RankFeatureFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0; } @Override - protected FeatureFieldMapper clone() { - return (FeatureFieldMapper) super.clone(); + protected RankFeatureFieldMapper clone() { + return (RankFeatureFieldMapper) super.clone(); } @Override - public FeatureFieldType fieldType() { - return (FeatureFieldType) super.fieldType(); + public RankFeatureFieldType fieldType() { + return (RankFeatureFieldType) super.fieldType(); } @Override @@ -207,8 +207,8 @@ protected void parseCreateField(ParseContext context, List field } if (context.doc().getByKey(name()) != null) { - throw new IllegalArgumentException("[feature] fields do not support indexing multiple values for the same field [" + name() + - "] in the same document"); + throw new IllegalArgumentException("[rank_feature] fields do not support indexing multiple values for the same field [" + + name() + "] in the same document"); } if (fieldType().positiveScoreImpact() == false) { diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldMapper.java similarity index 81% rename from modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java rename to modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldMapper.java index 2102a029a6ad6..a5cf4eeac692a 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldMapper.java @@ -33,18 +33,18 @@ import java.util.Map; /** - * This meta field only exists because feature fields index everything into a + * This meta field only exists because rank feature fields index everything into a * common _feature field and Elasticsearch has a custom codec that complains * when fields exist in the index and not in mappings. */ -public class FeatureMetaFieldMapper extends MetadataFieldMapper { +public class RankFeatureMetaFieldMapper extends MetadataFieldMapper { public static final String NAME = "_feature"; public static final String CONTENT_TYPE = "_feature"; public static class Defaults { - public static final MappedFieldType FIELD_TYPE = new FeatureMetaFieldType(); + public static final MappedFieldType FIELD_TYPE = new RankFeatureMetaFieldType(); static { FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS); @@ -58,16 +58,16 @@ public static class Defaults { } } - public static class Builder extends MetadataFieldMapper.Builder { + public static class Builder extends MetadataFieldMapper.Builder { public Builder(MappedFieldType existing) { super(NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); } @Override - public FeatureMetaFieldMapper build(BuilderContext context) { + public RankFeatureMetaFieldMapper build(BuilderContext context) { setupFieldType(context); - return new FeatureMetaFieldMapper(fieldType, context.indexSettings()); + return new RankFeatureMetaFieldMapper(fieldType, context.indexSettings()); } } @@ -82,7 +82,7 @@ public MetadataFieldMapper.Builder parse(String name, public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); if (fieldType != null) { - return new FeatureMetaFieldMapper(indexSettings, fieldType); + return new RankFeatureMetaFieldMapper(indexSettings, fieldType); } else { return parse(NAME, Collections.emptyMap(), context) .build(new BuilderContext(indexSettings, new ContentPath(1))); @@ -90,18 +90,18 @@ public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext c } } - public static final class FeatureMetaFieldType extends MappedFieldType { + public static final class RankFeatureMetaFieldType extends MappedFieldType { - public FeatureMetaFieldType() { + public RankFeatureMetaFieldType() { } - protected FeatureMetaFieldType(FeatureMetaFieldType ref) { + protected RankFeatureMetaFieldType(RankFeatureMetaFieldType ref) { super(ref); } @Override - public FeatureMetaFieldType clone() { - return new FeatureMetaFieldType(this); + public RankFeatureMetaFieldType clone() { + return new RankFeatureMetaFieldType(this); } @Override @@ -120,11 +120,11 @@ public Query termQuery(Object value, QueryShardContext context) { } } - private FeatureMetaFieldMapper(Settings indexSettings, MappedFieldType existing) { + private RankFeatureMetaFieldMapper(Settings indexSettings, MappedFieldType existing) { this(existing.clone(), indexSettings); } - private FeatureMetaFieldMapper(MappedFieldType fieldType, Settings indexSettings) { + private RankFeatureMetaFieldMapper(MappedFieldType fieldType, Settings indexSettings) { super(NAME, fieldType, Defaults.FIELD_TYPE, indexSettings); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapper.java similarity index 70% rename from modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapper.java rename to modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapper.java index b6b962a200a23..80b6fe4bc3442 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapper.java @@ -37,12 +37,12 @@ * A {@link FieldMapper} that exposes Lucene's {@link FeatureField} as a sparse * vector of features. */ -public class FeatureVectorFieldMapper extends FieldMapper { +public class RankFeaturesFieldMapper extends FieldMapper { - public static final String CONTENT_TYPE = "feature_vector"; + public static final String CONTENT_TYPE = "rank_features"; public static class Defaults { - public static final MappedFieldType FIELD_TYPE = new FeatureVectorFieldType(); + public static final MappedFieldType FIELD_TYPE = new RankFeaturesFieldType(); static { FIELD_TYPE.setTokenized(false); @@ -53,7 +53,7 @@ public static class Defaults { } } - public static class Builder extends FieldMapper.Builder { + public static class Builder extends FieldMapper.Builder { public Builder(String name) { super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); @@ -61,14 +61,14 @@ public Builder(String name) { } @Override - public FeatureVectorFieldType fieldType() { - return (FeatureVectorFieldType) super.fieldType(); + public RankFeaturesFieldType fieldType() { + return (RankFeaturesFieldType) super.fieldType(); } @Override - public FeatureVectorFieldMapper build(BuilderContext context) { + public RankFeaturesFieldMapper build(BuilderContext context) { setupFieldType(context); - return new FeatureVectorFieldMapper( + return new RankFeaturesFieldMapper( name, fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); } @@ -77,24 +77,24 @@ public FeatureVectorFieldMapper build(BuilderContext context) { public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - FeatureVectorFieldMapper.Builder builder = new FeatureVectorFieldMapper.Builder(name); + RankFeaturesFieldMapper.Builder builder = new RankFeaturesFieldMapper.Builder(name); return builder; } } - public static final class FeatureVectorFieldType extends MappedFieldType { + public static final class RankFeaturesFieldType extends MappedFieldType { - public FeatureVectorFieldType() { + public RankFeaturesFieldType() { setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); } - protected FeatureVectorFieldType(FeatureVectorFieldType ref) { + protected RankFeaturesFieldType(RankFeaturesFieldType ref) { super(ref); } - public FeatureVectorFieldType clone() { - return new FeatureVectorFieldType(this); + public RankFeaturesFieldType clone() { + return new RankFeaturesFieldType(this); } @Override @@ -104,44 +104,44 @@ public String typeName() { @Override public Query existsQuery(QueryShardContext context) { - throw new UnsupportedOperationException("[feature_vector] fields do not support [exists] queries"); + throw new UnsupportedOperationException("[rank_features] fields do not support [exists] queries"); } @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { - throw new UnsupportedOperationException("[feature_vector] fields do not support sorting, scripting or aggregating"); + throw new UnsupportedOperationException("[rank_features] fields do not support sorting, scripting or aggregating"); } @Override public Query termQuery(Object value, QueryShardContext context) { - throw new UnsupportedOperationException("Queries on [feature_vector] fields are not supported"); + throw new UnsupportedOperationException("Queries on [rank_features] fields are not supported"); } } - private FeatureVectorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + private RankFeaturesFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0; } @Override - protected FeatureVectorFieldMapper clone() { - return (FeatureVectorFieldMapper) super.clone(); + protected RankFeaturesFieldMapper clone() { + return (RankFeaturesFieldMapper) super.clone(); } @Override - public FeatureVectorFieldType fieldType() { - return (FeatureVectorFieldType) super.fieldType(); + public RankFeaturesFieldType fieldType() { + return (RankFeaturesFieldType) super.fieldType(); } @Override public void parse(ParseContext context) throws IOException { if (context.externalValueSet()) { - throw new IllegalArgumentException("[feature_vector] fields can't be used in multi-fields"); + throw new IllegalArgumentException("[rank_features] fields can't be used in multi-fields"); } if (context.parser().currentToken() != Token.START_OBJECT) { - throw new IllegalArgumentException("[feature_vector] fields must be json objects, expected a START_OBJECT but got: " + + throw new IllegalArgumentException("[rank_features] fields must be json objects, expected a START_OBJECT but got: " + context.parser().currentToken()); } @@ -155,12 +155,12 @@ public void parse(ParseContext context) throws IOException { final String key = name() + "." + feature; float value = context.parser().floatValue(true); if (context.doc().getByKey(key) != null) { - throw new IllegalArgumentException("[feature_vector] fields do not support indexing multiple values for the same " + - "feature [" + key + "] in the same document"); + throw new IllegalArgumentException("[rank_features] fields do not support indexing multiple values for the same " + + "rank feature [" + key + "] in the same document"); } context.doc().addWithKey(key, new FeatureField(name(), feature, value)); } else { - throw new IllegalArgumentException("[feature_vector] fields take hashes that map a feature to a strictly positive " + + throw new IllegalArgumentException("[rank_features] fields take hashes that map a feature to a strictly positive " + "float, but got unexpected token " + token); } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/RankFeatureQueryBuilder.java similarity index 89% rename from modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java rename to modules/mapper-extras/src/main/java/org/elasticsearch/index/query/RankFeatureQueryBuilder.java index 3b7fb97eab1de..259c69cff7fac 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/RankFeatureQueryBuilder.java @@ -27,9 +27,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.mapper.FeatureFieldMapper.FeatureFieldType; -import org.elasticsearch.index.mapper.FeatureMetaFieldMapper; -import org.elasticsearch.index.mapper.FeatureVectorFieldMapper.FeatureVectorFieldType; +import org.elasticsearch.index.mapper.RankFeatureFieldMapper.RankFeatureFieldType; +import org.elasticsearch.index.mapper.RankFeatureMetaFieldMapper; +import org.elasticsearch.index.mapper.RankFeaturesFieldMapper.RankFeaturesFieldType; import org.elasticsearch.index.mapper.MappedFieldType; import java.io.IOException; @@ -37,12 +37,12 @@ import java.util.Objects; /** - * Query to run on a [feature] field. + * Query to run on a [rank_feature] field. */ -public final class FeatureQueryBuilder extends AbstractQueryBuilder { +public final class RankFeatureQueryBuilder extends AbstractQueryBuilder { /** - * Scoring function for a [feature] field. + * Scoring function for a [rank_feature] field. */ public abstract static class ScoreFunction { @@ -260,23 +260,23 @@ private static ScoreFunction readScoreFunction(StreamInput in) throws IOExceptio } } - public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "feature", args -> { final String field = (String) args[0]; final float boost = args[1] == null ? DEFAULT_BOOST : (Float) args[1]; final String queryName = (String) args[2]; long numNonNulls = Arrays.stream(args, 3, args.length).filter(Objects::nonNull).count(); - final FeatureQueryBuilder query; + final RankFeatureQueryBuilder query; if (numNonNulls > 1) { throw new IllegalArgumentException("Can only specify one of [log], [saturation] and [sigmoid]"); } else if (numNonNulls == 0) { - query = new FeatureQueryBuilder(field, new ScoreFunction.Saturation()); + query = new RankFeatureQueryBuilder(field, new ScoreFunction.Saturation()); } else { ScoreFunction scoreFunction = (ScoreFunction) Arrays.stream(args, 3, args.length) .filter(Objects::nonNull) .findAny() .get(); - query = new FeatureQueryBuilder(field, scoreFunction); + query = new RankFeatureQueryBuilder(field, scoreFunction); } query.boost(boost); query.queryName(queryName); @@ -294,17 +294,17 @@ private static ScoreFunction readScoreFunction(StreamInput in) throws IOExceptio ScoreFunction.Sigmoid.PARSER, new ParseField("sigmoid")); } - public static final String NAME = "feature"; + public static final String NAME = "rank_feature"; private final String field; private final ScoreFunction scoreFunction; - public FeatureQueryBuilder(String field, ScoreFunction scoreFunction) { + public RankFeatureQueryBuilder(String field, ScoreFunction scoreFunction) { this.field = Objects.requireNonNull(field); this.scoreFunction = Objects.requireNonNull(scoreFunction); } - public FeatureQueryBuilder(StreamInput in) throws IOException { + public RankFeatureQueryBuilder(StreamInput in) throws IOException { super(in); this.field = in.readString(); this.scoreFunction = readScoreFunction(in); @@ -334,27 +334,27 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep protected Query doToQuery(QueryShardContext context) throws IOException { final MappedFieldType ft = context.fieldMapper(field); - if (ft instanceof FeatureFieldType) { - final FeatureFieldType fft = (FeatureFieldType) ft; - return scoreFunction.toQuery(FeatureMetaFieldMapper.NAME, field, fft.positiveScoreImpact()); + if (ft instanceof RankFeatureFieldType) { + final RankFeatureFieldType fft = (RankFeatureFieldType) ft; + return scoreFunction.toQuery(RankFeatureMetaFieldMapper.NAME, field, fft.positiveScoreImpact()); } else if (ft == null) { final int lastDotIndex = field.lastIndexOf('.'); if (lastDotIndex != -1) { final String parentField = field.substring(0, lastDotIndex); final MappedFieldType parentFt = context.fieldMapper(parentField); - if (parentFt instanceof FeatureVectorFieldType) { + if (parentFt instanceof RankFeaturesFieldType) { return scoreFunction.toQuery(parentField, field.substring(lastDotIndex + 1), true); } } return new MatchNoDocsQuery(); // unmapped field } else { - throw new IllegalArgumentException("[feature] query only works on [feature] fields and features of [feature_vector] fields, " + - "not [" + ft.typeName() + "]"); + throw new IllegalArgumentException("[rank_feature] query only works on [rank_feature] fields and " + + "features of [rank_features] fields, not [" + ft.typeName() + "]"); } } @Override - protected boolean doEquals(FeatureQueryBuilder other) { + protected boolean doEquals(RankFeatureQueryBuilder other) { return Objects.equals(field, other.field) && Objects.equals(scoreFunction, other.scoreFunction); } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldMapperTests.java similarity index 93% rename from modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldMapperTests.java index 73879bb0225b9..4c2f0893bb01e 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldMapperTests.java @@ -38,7 +38,7 @@ import java.util.Arrays; import java.util.Collection; -public class FeatureFieldMapperTests extends ESSingleNodeTestCase { +public class RankFeatureFieldMapperTests extends ESSingleNodeTestCase { IndexService indexService; DocumentMapperParser parser; @@ -65,7 +65,7 @@ static int getFrequency(TokenStream tk) throws IOException { public void testDefaults() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .startObject("properties").startObject("field").field("type", "rank_feature").endObject().endObject() .endObject().endObject()); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); @@ -100,7 +100,7 @@ public void testDefaults() throws Exception { public void testNegativeScoreImpact() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "feature") + .startObject("properties").startObject("field").field("type", "rank_feature") .field("positive_score_impact", false).endObject().endObject() .endObject().endObject()); @@ -136,8 +136,8 @@ public void testNegativeScoreImpact() throws Exception { public void testRejectMultiValuedFields() throws MapperParsingException, IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "feature").endObject().startObject("foo") - .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .startObject("properties").startObject("field").field("type", "rank_feature").endObject().startObject("foo") + .startObject("properties").startObject("field").field("type", "rank_feature").endObject().endObject() .endObject().endObject().endObject().endObject()); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); @@ -151,7 +151,7 @@ public void testRejectMultiValuedFields() throws MapperParsingException, IOExcep .field("field", Arrays.asList(10, 20)) .endObject()), XContentType.JSON))); - assertEquals("[feature] fields do not support indexing multiple values for the same field [field] in the same document", + assertEquals("[rank_feature] fields do not support indexing multiple values for the same field [field] in the same document", e.getCause().getMessage()); e = expectThrows(MapperParsingException.class, @@ -168,7 +168,7 @@ public void testRejectMultiValuedFields() throws MapperParsingException, IOExcep .endArray() .endObject()), XContentType.JSON))); - assertEquals("[feature] fields do not support indexing multiple values for the same field [foo.field] in the same document", + assertEquals("[rank_feature] fields do not support indexing multiple values for the same field [foo.field] in the same document", e.getCause().getMessage()); } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldTypeTests.java similarity index 79% rename from modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldTypeTests.java index 9debd0736602c..e901be8688fcd 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureFieldTypeTests.java @@ -21,11 +21,11 @@ import org.junit.Before; -public class FeatureFieldTypeTests extends FieldTypeTestCase { +public class RankFeatureFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { - return new FeatureFieldMapper.FeatureFieldType(); + return new RankFeatureFieldMapper.RankFeatureFieldType(); } @Before @@ -33,13 +33,13 @@ public void setupProperties() { addModifier(new Modifier("positive_score_impact", false) { @Override public void modify(MappedFieldType ft) { - FeatureFieldMapper.FeatureFieldType tft = (FeatureFieldMapper.FeatureFieldType)ft; + RankFeatureFieldMapper.RankFeatureFieldType tft = (RankFeatureFieldMapper.RankFeatureFieldType)ft; tft.setPositiveScoreImpact(tft.positiveScoreImpact() == false); } @Override public void normalizeOther(MappedFieldType other) { super.normalizeOther(other); - ((FeatureFieldMapper.FeatureFieldType) other).setPositiveScoreImpact(true); + ((RankFeatureFieldMapper.RankFeatureFieldType) other).setPositiveScoreImpact(true); } }); } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldMapperTests.java similarity index 90% rename from modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldMapperTests.java index 99697b1abaf58..f293e10f9e9ea 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldMapperTests.java @@ -29,7 +29,7 @@ import java.util.Collection; -public class FeatureMetaFieldMapperTests extends ESSingleNodeTestCase { +public class RankFeatureMetaFieldMapperTests extends ESSingleNodeTestCase { IndexService indexService; DocumentMapperParser parser; @@ -47,12 +47,12 @@ protected Collection> getPlugins() { public void testBasics() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .startObject("properties").startObject("field").field("type", "rank_feature").endObject().endObject() .endObject().endObject()); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - assertNotNull(mapper.metadataMapper(FeatureMetaFieldMapper.class)); + assertNotNull(mapper.metadataMapper(RankFeatureMetaFieldMapper.class)); } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldTypeTests.java similarity index 86% rename from modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldTypeTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldTypeTests.java index e8d84ce7a0219..6045b24e607a9 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeatureMetaFieldTypeTests.java @@ -19,11 +19,11 @@ package org.elasticsearch.index.mapper; -public class FeatureVectorFieldTypeTests extends FieldTypeTestCase { +public class RankFeatureMetaFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { - return new FeatureVectorFieldMapper.FeatureVectorFieldType(); + return new RankFeatureMetaFieldMapper.RankFeatureMetaFieldType(); } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapperTests.java similarity index 87% rename from modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapperTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapperTests.java index 852b6f6fc3c5c..720e5f2efa578 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldMapperTests.java @@ -36,7 +36,7 @@ import java.util.Arrays; import java.util.Collection; -public class FeatureVectorFieldMapperTests extends ESSingleNodeTestCase { +public class RankFeaturesFieldMapperTests extends ESSingleNodeTestCase { IndexService indexService; DocumentMapperParser parser; @@ -54,7 +54,7 @@ protected Collection> getPlugins() { public void testDefaults() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "feature_vector").endObject().endObject() + .startObject("properties").startObject("field").field("type", "rank_features").endObject().endObject() .endObject().endObject()); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); @@ -79,15 +79,15 @@ public void testDefaults() throws Exception { FeatureField featureField2 = (FeatureField) fields[1]; assertThat(featureField2.stringValue(), Matchers.equalTo("bar")); - int freq1 = FeatureFieldMapperTests.getFrequency(featureField1.tokenStream(null, null)); - int freq2 = FeatureFieldMapperTests.getFrequency(featureField2.tokenStream(null, null)); + int freq1 = RankFeatureFieldMapperTests.getFrequency(featureField1.tokenStream(null, null)); + int freq2 = RankFeatureFieldMapperTests.getFrequency(featureField2.tokenStream(null, null)); assertTrue(freq1 < freq2); } public void testRejectMultiValuedFields() throws MapperParsingException, IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "feature_vector").endObject().startObject("foo") - .startObject("properties").startObject("field").field("type", "feature_vector").endObject().endObject() + .startObject("properties").startObject("field").field("type", "rank_features").endObject().startObject("foo") + .startObject("properties").startObject("field").field("type", "rank_features").endObject().endObject() .endObject().endObject().endObject().endObject()); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); @@ -103,7 +103,7 @@ public void testRejectMultiValuedFields() throws MapperParsingException, IOExcep .endObject() .endObject()), XContentType.JSON))); - assertEquals("[feature_vector] fields take hashes that map a feature to a strictly positive float, but got unexpected token " + + assertEquals("[rank_features] fields take hashes that map a feature to a strictly positive float, but got unexpected token " + "START_ARRAY", e.getCause().getMessage()); e = expectThrows(MapperParsingException.class, @@ -124,7 +124,7 @@ public void testRejectMultiValuedFields() throws MapperParsingException, IOExcep .endArray() .endObject()), XContentType.JSON))); - assertEquals("[feature_vector] fields do not support indexing multiple values for the same feature [foo.field.bar] in the same " + - "document", e.getCause().getMessage()); + assertEquals("[rank_features] fields do not support indexing multiple values for the same rank feature [foo.field.bar] in " + + "the same document", e.getCause().getMessage()); } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldTypeTests.java similarity index 87% rename from modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldTypeTests.java index ef261573c9682..21a60b66f7683 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/RankFeaturesFieldTypeTests.java @@ -19,11 +19,11 @@ package org.elasticsearch.index.mapper; -public class FeatureMetaFieldTypeTests extends FieldTypeTestCase { +public class RankFeaturesFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { - return new FeatureMetaFieldMapper.FeatureMetaFieldType(); + return new RankFeaturesFieldMapper.RankFeaturesFieldType(); } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/RankFeatureQueryBuilderTests.java similarity index 82% rename from modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/query/RankFeatureQueryBuilderTests.java index 40da4b53227af..aea37e2a8eeb7 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/RankFeatureQueryBuilderTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.index.mapper.MapperExtrasPlugin; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.query.FeatureQueryBuilder.ScoreFunction; +import org.elasticsearch.index.query.RankFeatureQueryBuilder.ScoreFunction; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; @@ -41,14 +41,14 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.either; -public class FeatureQueryBuilderTests extends AbstractQueryTestCase { +public class RankFeatureQueryBuilderTests extends AbstractQueryTestCase { @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { mapperService.merge("_doc", new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef("_doc", - "my_feature_field", "type=feature", - "my_negative_feature_field", "type=feature,positive_score_impact=false", - "my_feature_vector_field", "type=feature_vector"))), MapperService.MergeReason.MAPPING_UPDATE); + "my_feature_field", "type=rank_feature", + "my_negative_feature_field", "type=rank_feature,positive_score_impact=false", + "my_feature_vector_field", "type=rank_features"))), MapperService.MergeReason.MAPPING_UPDATE); } @Override @@ -57,7 +57,7 @@ protected Collection> getPlugins() { } @Override - protected FeatureQueryBuilder doCreateTestQueryBuilder() { + protected RankFeatureQueryBuilder doCreateTestQueryBuilder() { ScoreFunction function; boolean mayUseNegativeField = true; switch (random().nextInt(3)) { @@ -87,18 +87,18 @@ protected FeatureQueryBuilder doCreateTestQueryBuilder() { } final String field = randomFrom(fields); - return new FeatureQueryBuilder(field, function); + return new RankFeatureQueryBuilder(field, function); } @Override - protected void doAssertLuceneQuery(FeatureQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { + protected void doAssertLuceneQuery(RankFeatureQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { Class expectedClass = FeatureField.newSaturationQuery("", "", 1, 1).getClass(); assertThat(query, either(instanceOf(MatchNoDocsQuery.class)).or(instanceOf(expectedClass))); } public void testDefaultScoreFunction() throws IOException { String query = "{\n" + - " \"feature\" : {\n" + + " \"rank_feature\" : {\n" + " \"field\": \"my_feature_field\"\n" + " }\n" + "}"; @@ -108,17 +108,18 @@ public void testDefaultScoreFunction() throws IOException { public void testIllegalField() throws IOException { String query = "{\n" + - " \"feature\" : {\n" + + " \"rank_feature\" : {\n" + " \"field\": \"" + STRING_FIELD_NAME + "\"\n" + " }\n" + "}"; IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(query).toQuery(createShardContext())); - assertEquals("[feature] query only works on [feature] fields and features of [feature_vector] fields, not [text]", e.getMessage()); + assertEquals("[rank_feature] query only works on [rank_feature] fields and features of [rank_features] fields, not [text]", + e.getMessage()); } public void testIllegalCombination() throws IOException { String query = "{\n" + - " \"feature\" : {\n" + + " \"rank_feature\" : {\n" + " \"field\": \"my_negative_feature_field\",\n" + " \"log\" : {\n" + " \"scaling_factor\": 4.5\n" + diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_feature/10_basic.yml similarity index 89% rename from modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml rename to modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_feature/10_basic.yml index b52103822a76a..f02e64b7f5cec 100644 --- a/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_feature/10_basic.yml @@ -1,7 +1,7 @@ setup: - skip: version: " - 6.99.99" - reason: "The feature field/query was introduced in 7.0.0" + reason: "The rank feature field/query was introduced in 7.0.0" - do: indices.create: @@ -13,9 +13,9 @@ setup: _doc: properties: pagerank: - type: feature + type: rank_feature url_length: - type: feature + type: rank_feature positive_score_impact: false - do: @@ -47,7 +47,7 @@ setup: rest_total_hits_as_int: true body: query: - feature: + rank_feature: field: pagerank log: scaling_factor: 3 @@ -69,7 +69,7 @@ setup: rest_total_hits_as_int: true body: query: - feature: + rank_feature: field: pagerank saturation: pivot: 20 @@ -91,7 +91,7 @@ setup: rest_total_hits_as_int: true body: query: - feature: + rank_feature: field: pagerank sigmoid: pivot: 20 @@ -115,7 +115,7 @@ setup: rest_total_hits_as_int: true body: query: - feature: + rank_feature: field: url_length log: scaling_factor: 3 @@ -128,7 +128,7 @@ setup: rest_total_hits_as_int: true body: query: - feature: + rank_feature: field: url_length saturation: pivot: 20 @@ -150,7 +150,7 @@ setup: rest_total_hits_as_int: true body: query: - feature: + rank_feature: field: url_length sigmoid: pivot: 20 diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature_vector/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_features/10_basic.yml similarity index 89% rename from modules/mapper-extras/src/test/resources/rest-api-spec/test/feature_vector/10_basic.yml rename to modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_features/10_basic.yml index ede0a0eed8708..66b585f6ed76d 100644 --- a/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature_vector/10_basic.yml +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/rank_features/10_basic.yml @@ -1,7 +1,7 @@ setup: - skip: version: " - 6.99.99" - reason: "The feature_vector field was introduced in 7.0.0" + reason: "The rank_features field was introduced in 7.0.0" - do: indices.create: @@ -13,7 +13,7 @@ setup: _doc: properties: tags: - type: feature_vector + type: rank_features - do: index: @@ -46,7 +46,7 @@ setup: rest_total_hits_as_int: true body: query: - feature: + rank_feature: field: tags.bar log: scaling_factor: 3 @@ -68,7 +68,7 @@ setup: rest_total_hits_as_int: true body: query: - feature: + rank_feature: field: tags.bar saturation: pivot: 20 @@ -90,7 +90,7 @@ setup: rest_total_hits_as_int: true body: query: - feature: + rank_feature: field: tags.bar sigmoid: pivot: 20 From 3ccd4887556726f13ef07ad6ab1733a010018119 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 24 Jan 2019 21:48:50 -0500 Subject: [PATCH 25/64] Remove testMappingsPropagatedToMasterNodeImmediately This test is obsolete since #31140 where an index request with dynamic mapping update no longer requires acking. Closes #37816 --- .../index/mapper/DynamicMappingIT.java | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 47ab7f71b51da..5655d741a9dd2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -34,8 +34,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; - public class DynamicMappingIT extends ESIntegTestCase { @Override @@ -75,21 +73,6 @@ private static void assertMappingsHaveField(GetMappingsResponse mappings, String assertTrue("Could not find [" + field + "] in " + typeMappingsMap.toString(), properties.containsKey(field)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37816") - public void testMappingsPropagatedToMasterNodeImmediately() throws IOException { - assertAcked(prepareCreate("index")); - - // works when the type has been dynamically created - client().prepareIndex("index", "type", "1").setSource("foo", 3).get(); - GetMappingsResponse mappings = client().admin().indices().prepareGetMappings("index").setTypes("type").get(); - assertMappingsHaveField(mappings, "index", "type", "foo"); - - // works if the type already existed - client().prepareIndex("index", "type", "1").setSource("bar", "baz").get(); - mappings = client().admin().indices().prepareGetMappings("index").setTypes("type").get(); - assertMappingsHaveField(mappings, "index", "type", "bar"); - } - public void testConcurrentDynamicUpdates() throws Throwable { createIndex("index"); final Thread[] indexThreads = new Thread[32]; From 6a13a252e9d0eadc7bd099b3415d503beb3b7a7e Mon Sep 17 00:00:00 2001 From: Yuri Astrakhan Date: Thu, 24 Jan 2019 23:45:18 -0500 Subject: [PATCH 26/64] Abstract GeoHashGridAggregatorFactory creation, renamed geohash -> hash (#37836) * Delegate `new GeoHashGridAggregatorFactory(...)` inside the `GeoGridAggregationBuilder` to the child classes. * Rename all `geohash...` to `hash...` --- .../bucket/geogrid/CellIdSource.java | 2 +- .../geogrid/GeoGridAggregationBuilder.java | 10 ++++++- .../bucket/geogrid/GeoGridAggregator.java | 16 ++++++------ .../GeoHashGridAggregationBuilder.java | 15 +++++++++++ .../bucket/geogrid/InternalGeoGrid.java | 4 +-- .../bucket/geogrid/InternalGeoGridBucket.java | 26 +++++++++---------- .../bucket/geogrid/InternalGeoHashGrid.java | 2 +- .../geogrid/InternalGeoHashGridBucket.java | 12 ++++----- .../bucket/geogrid/ParsedGeoGridBucket.java | 4 +-- .../geogrid/ParsedGeoHashGridBucket.java | 6 ++--- .../bucket/geogrid/GeoHashGridTests.java | 4 +-- 11 files changed, 62 insertions(+), 39 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java index fce0747b3dc60..0cc7734ad7685 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/CellIdSource.java @@ -39,7 +39,7 @@ class CellIdSource extends ValuesSource.Numeric { CellIdSource(GeoPoint valuesSource, int precision, GeoPointLongEncoder encoder) { this.valuesSource = valuesSource; - //different GeoPoints could map to the same or different geohash cells. + //different GeoPoints could map to the same or different hashing cells. this.precision = precision; this.encoder = encoder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index c90c77b91be37..76fa6621d01cc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -105,6 +105,14 @@ protected void innerWriteTo(StreamOutput out) throws IOException { */ public abstract GeoGridAggregationBuilder precision(int precision); + /** + * Creates a new instance of the {@link ValuesSourceAggregatorFactory}-derived class specific to the geo aggregation. + */ + protected abstract ValuesSourceAggregatorFactory createFactory( + String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, + SearchContext context, AggregatorFactory parent, Builder subFactoriesBuilder, Map metaData + ) throws IOException; + public int precision() { return precision; } @@ -157,7 +165,7 @@ public int shardSize() { if (shardSize < requiredSize) { shardSize = requiredSize; } - return new GeoHashGridAggregatorFactory(name, config, precision, requiredSize, shardSize, context, parent, + return createFactory(name, config, precision, requiredSize, shardSize, context, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java index a02bb4c6c1a55..4935b6c6ba7d2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -39,7 +39,7 @@ import java.util.Map; /** - * Aggregates data expressed as GeoHash longs (for efficiency's sake) but formats results as Geohash strings. + * Aggregates data expressed as longs (for efficiency's sake) but formats results as aggregation-specific strings. */ public abstract class GeoGridAggregator extends BucketsAggregator { @@ -103,20 +103,20 @@ static class OrdinalBucket extends InternalGeoGridBucket { InternalGeoGridBucket sourceBucket; // used to keep track of appropriate getKeyAsString method OrdinalBucket(InternalGeoGridBucket sourceBucket) { - super(sourceBucket.geohashAsLong, sourceBucket.docCount, sourceBucket.aggregations); + super(sourceBucket.hashAsLong, sourceBucket.docCount, sourceBucket.aggregations); this.sourceBucket = sourceBucket; } - void geohashAsLong(long geohashAsLong) { - this.geohashAsLong = geohashAsLong; - this.sourceBucket.geohashAsLong = geohashAsLong; + void hashAsLong(long hashAsLong) { + this.hashAsLong = hashAsLong; + this.sourceBucket.hashAsLong = hashAsLong; } @Override - InternalGeoGridBucket buildBucket(InternalGeoGridBucket bucket, long geoHashAsLong, long docCount, + InternalGeoGridBucket buildBucket(InternalGeoGridBucket bucket, long hashAsLong, long docCount, InternalAggregations aggregations) { OrdinalBucket ordBucket = new OrdinalBucket(bucket); - ordBucket.geohashAsLong = geoHashAsLong; + ordBucket.hashAsLong = hashAsLong; ordBucket.docCount = docCount; ordBucket.aggregations = aggregations; // this is done because the aggregator may be rebuilt from cache (non OrdinalBucket), @@ -163,7 +163,7 @@ public InternalGeoGrid buildAggregation(long owningBucketOrdinal) throws IOExcep // need a special function to keep the source bucket // up-to-date so it can get the appropriate key - spare.geohashAsLong(bucketOrds.get(i)); + spare.hashAsLong(bucketOrds.get(i)); spare.docCount = bucketDocCount(i); spare.bucketOrd = i; spare = (OrdinalBucket) ordered.insertWithOverflow(spare); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index 416634011de0e..4e560e681c796 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -25,6 +25,11 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -53,6 +58,16 @@ public GeoGridAggregationBuilder precision(int precision) { return this; } + @Override + protected ValuesSourceAggregatorFactory createFactory( + String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, + SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, + Map metaData + ) throws IOException { + return new GeoHashGridAggregatorFactory(name, config, precision, requiredSize, shardSize, context, parent, + subFactoriesBuilder, metaData); + } + private GeoHashGridAggregationBuilder(GeoHashGridAggregationBuilder clone, AggregatorFactories.Builder factoriesBuilder, Map metaData) { super(clone, factoriesBuilder, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java index 9608ac914c0aa..36908f51d70de 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java @@ -89,10 +89,10 @@ public InternalGeoGrid doReduce(List aggregations, ReduceCo } for (Object obj : grid.buckets) { B bucket = (B) obj; - List existingBuckets = buckets.get(bucket.geohashAsLong()); + List existingBuckets = buckets.get(bucket.hashAsLong()); if (existingBuckets == null) { existingBuckets = new ArrayList<>(aggregations.size()); - buckets.put(bucket.geohashAsLong(), existingBuckets); + buckets.put(bucket.hashAsLong(), existingBuckets); } existingBuckets.add(bucket); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java index 2184ed76e5071..ed699e5e3edb2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java @@ -35,37 +35,37 @@ public abstract class InternalGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket implements GeoGrid.Bucket, Comparable { - protected long geohashAsLong; + protected long hashAsLong; protected long docCount; protected InternalAggregations aggregations; - public InternalGeoGridBucket(long geohashAsLong, long docCount, InternalAggregations aggregations) { + public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { this.docCount = docCount; this.aggregations = aggregations; - this.geohashAsLong = geohashAsLong; + this.hashAsLong = hashAsLong; } /** * Read from a stream. */ public InternalGeoGridBucket(StreamInput in) throws IOException { - geohashAsLong = in.readLong(); + hashAsLong = in.readLong(); docCount = in.readVLong(); aggregations = InternalAggregations.readAggregations(in); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeLong(geohashAsLong); + out.writeLong(hashAsLong); out.writeVLong(docCount); aggregations.writeTo(out); } - abstract B buildBucket(InternalGeoGridBucket bucket, long geoHashAsLong, long docCount, InternalAggregations aggregations); + abstract B buildBucket(InternalGeoGridBucket bucket, long hashAsLong, long docCount, InternalAggregations aggregations); - long geohashAsLong() { - return geohashAsLong; + long hashAsLong() { + return hashAsLong; } @Override @@ -80,10 +80,10 @@ public Aggregations getAggregations() { @Override public int compareTo(InternalGeoGridBucket other) { - if (this.geohashAsLong > other.geohashAsLong) { + if (this.hashAsLong > other.hashAsLong) { return 1; } - if (this.geohashAsLong < other.geohashAsLong) { + if (this.hashAsLong < other.hashAsLong) { return -1; } return 0; @@ -97,7 +97,7 @@ public B reduce(List buckets, InternalAggregation.ReduceContext context) { aggregationsList.add(bucket.aggregations); } final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); - return buildBucket(this, geohashAsLong, docCount, aggs); + return buildBucket(this, hashAsLong, docCount, aggs); } @Override @@ -115,14 +115,14 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; InternalGeoGridBucket bucket = (InternalGeoGridBucket) o; - return geohashAsLong == bucket.geohashAsLong && + return hashAsLong == bucket.hashAsLong && docCount == bucket.docCount && Objects.equals(aggregations, bucket.aggregations); } @Override public int hashCode() { - return Objects.hash(geohashAsLong, docCount, aggregations); + return Objects.hash(hashAsLong, docCount, aggregations); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index 57eacdcb4ec7f..0c28788666249 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -51,7 +51,7 @@ public InternalGeoGrid create(List buckets) { @Override public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { - return new InternalGeoHashGridBucket(prototype.geohashAsLong, prototype.docCount, aggregations); + return new InternalGeoHashGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java index ed4df7c0d0d86..1d77a54523b11 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java @@ -26,8 +26,8 @@ import java.io.IOException; public class InternalGeoHashGridBucket extends InternalGeoGridBucket { - InternalGeoHashGridBucket(long geohashAsLong, long docCount, InternalAggregations aggregations) { - super(geohashAsLong, docCount, aggregations); + InternalGeoHashGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + super(hashAsLong, docCount, aggregations); } /** @@ -38,18 +38,18 @@ public InternalGeoHashGridBucket(StreamInput in) throws IOException { } @Override - InternalGeoHashGridBucket buildBucket(InternalGeoGridBucket bucket, long geoHashAsLong, long docCount, + InternalGeoHashGridBucket buildBucket(InternalGeoGridBucket bucket, long hashAsLong, long docCount, InternalAggregations aggregations) { - return new InternalGeoHashGridBucket(geoHashAsLong, docCount, aggregations); + return new InternalGeoHashGridBucket(hashAsLong, docCount, aggregations); } @Override public String getKeyAsString() { - return GeoHashUtils.stringEncode(geohashAsLong); + return GeoHashUtils.stringEncode(hashAsLong); } @Override public GeoPoint getKey() { - return GeoPoint.fromGeohash(geohashAsLong); + return GeoPoint.fromGeohash(hashAsLong); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java index 493b77f547f75..4e71119fbeee5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java @@ -26,10 +26,10 @@ public abstract class ParsedGeoGridBucket extends ParsedMultiBucketAggregation.ParsedBucket implements GeoGrid.Bucket { - protected String geohashAsString; + protected String hashAsString; @Override protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { - return builder.field(Aggregation.CommonFields.KEY.getPreferredName(), geohashAsString); + return builder.field(Aggregation.CommonFields.KEY.getPreferredName(), hashAsString); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java index fe7846692a1f0..f74ad70f64e53 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java @@ -27,15 +27,15 @@ public class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { @Override public GeoPoint getKey() { - return GeoPoint.fromGeohash(geohashAsString); + return GeoPoint.fromGeohash(hashAsString); } @Override public String getKeyAsString() { - return geohashAsString; + return hashAsString; } static ParsedGeoHashGridBucket fromXContent(XContentParser parser) throws IOException { - return parseXContent(parser, false, ParsedGeoHashGridBucket::new, (p, bucket) -> bucket.geohashAsString = p.textOrNull()); + return parseXContent(parser, false, ParsedGeoHashGridBucket::new, (p, bucket) -> bucket.hashAsString = p.textOrNull()); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java index 8c291e69fabe1..02c8016556220 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java @@ -72,9 +72,9 @@ protected void assertReduced(InternalGeoHashGrid reduced, List> map = new HashMap<>(); for (InternalGeoHashGrid input : inputs) { for (InternalGeoGridBucket bucket : input.getBuckets()) { - List buckets = map.get(bucket.geohashAsLong); + List buckets = map.get(bucket.hashAsLong); if (buckets == null) { - map.put(bucket.geohashAsLong, buckets = new ArrayList<>()); + map.put(bucket.hashAsLong, buckets = new ArrayList<>()); } buckets.add(bucket); } From 03690d12b24d742e26149ef09a881f2f37375865 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Fri, 25 Jan 2019 15:46:39 +1100 Subject: [PATCH 27/64] Remove TLS 1.0 as a default SSL protocol (#37512) The default value for ssl.supported_protocols no longer includes TLSv1 as this is an old protocol with known security issues. Administrators can enable TLSv1.0 support by configuring the appropriate `ssl.supported_protocols` setting, for example: xpack.security.http.ssl.supported_protocols: ["TLSv1.2","TLSv1.1","TLSv1"] Relates: #36021 --- .../migration/migrate_7_0/settings.asciidoc | 13 +++++++++++++ docs/reference/settings/security-settings.asciidoc | 7 +++---- docs/reference/settings/ssl-settings.asciidoc | 4 ++-- .../common/ssl/SslConfigurationLoader.java | 2 +- .../org/elasticsearch/xpack/core/XPackSettings.java | 2 +- .../xpack/core/ssl/SSLConfigurationTests.java | 4 ++++ 6 files changed, 24 insertions(+), 8 deletions(-) diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index a005f80c1663a..6e9f7451e094f 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -132,6 +132,19 @@ fallback to a default configuration when using TLS. Each component (realm, trans http client, etc) must now be configured with their own settings for TLS if it is being used. +[float] +[[tls-v1-removed]] +==== TLS v1.0 disabled + +TLS version 1.0 is now disabled by default as it suffers from +https://www.owasp.org/index.php/Transport_Layer_Protection_Cheat_Sheet#Rule_-_Only_Support_Strong_Protocols[known security issues]. +The default protocols are now TLSv1.2 and TLSv1.1. +You can enable TLS v1.0 by configuring the relevant `ssl.supported_protocols` setting to include `"TLSv1"`, for example: +[source,yaml] +-------------------------------------------------- +xpack.security.http.ssl.supported_protocols: [ "TLSv1.2", "TLSv1.1", "TLSv1" ] +-------------------------------------------------- + [float] [[watcher-notifications-account-settings]] ==== Watcher notifications account settings diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 8a7144c0a1388..16ce60e986b93 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -480,7 +480,7 @@ and `full`. Defaults to `full`. See <> for an explanation of these values. `ssl.supported_protocols`:: -Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.2,TLSv1.1,TLSv1`. +Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.2,TLSv1.1`. `ssl.cipher_suites`:: Specifies the cipher suites that should be supported when communicating with the LDAP server. @@ -724,7 +724,7 @@ and `full`. Defaults to `full`. See <> for an explanation of these values. `ssl.supported_protocols`:: -Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.2, TLSv1.1, TLSv1`. +Supported protocols for TLS/SSL (with versions). Defaults to `TLSv1.2, TLSv1.1`. `ssl.cipher_suites`:: Specifies the cipher suites that should be supported when communicating with the Active Directory server. @@ -1206,8 +1206,7 @@ settings. For more information, see `ssl.supported_protocols`:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, -`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`, -`TLSv1`. +`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`. + -- NOTE: If `xpack.security.fips_mode.enabled` is `true`, you cannot use `SSLv2Hello` diff --git a/docs/reference/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc index 1ff9ebc03ae8d..a04f5581f2abd 100644 --- a/docs/reference/settings/ssl-settings.asciidoc +++ b/docs/reference/settings/ssl-settings.asciidoc @@ -11,8 +11,8 @@ endif::server[] +{ssl-prefix}.ssl.supported_protocols+:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, -`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`, -`TLSv1`. +`SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. Defaults to `TLSv1.2`, `TLSv1.1`. + ifdef::server[] +{ssl-prefix}.ssl.client_authentication+:: diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java index 186d20b1ea858..efe87f7c30322 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java @@ -68,7 +68,7 @@ */ public abstract class SslConfigurationLoader { - static final List DEFAULT_PROTOCOLS = Arrays.asList("TLSv1.2", "TLSv1.1", "TLSv1"); + static final List DEFAULT_PROTOCOLS = Arrays.asList("TLSv1.2", "TLSv1.1"); static final List DEFAULT_CIPHERS = loadDefaultCiphers(); private static final char[] EMPTY_PASSWORD = new char[0]; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 22bc6f4b29482..6a2a693d3b15e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -154,7 +154,7 @@ private XPackSettings() { } }, Setting.Property.NodeScope); - public static final List DEFAULT_SUPPORTED_PROTOCOLS = Arrays.asList("TLSv1.2", "TLSv1.1", "TLSv1"); + public static final List DEFAULT_SUPPORTED_PROTOCOLS = Arrays.asList("TLSv1.2", "TLSv1.1"); public static final SSLClientAuth CLIENT_AUTH_DEFAULT = SSLClientAuth.REQUIRED; public static final SSLClientAuth HTTP_CLIENT_AUTH_DEFAULT = SSLClientAuth.NONE; public static final VerificationMode VERIFICATION_MODE_DEFAULT = VerificationMode.FULL; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationTests.java index 74ae2ae55c126..b19044566b2fd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ssl.TrustConfig.CombiningTrustConfig; import javax.net.ssl.KeyManager; @@ -22,6 +23,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; @@ -35,6 +37,8 @@ public void testThatSSLConfigurationHasCorrectDefaults() { assertThat(globalConfig.keyConfig(), sameInstance(KeyConfig.NONE)); assertThat(globalConfig.trustConfig(), is(not((globalConfig.keyConfig())))); assertThat(globalConfig.trustConfig(), instanceOf(DefaultJDKTrustConfig.class)); + assertThat(globalConfig.supportedProtocols(), equalTo(XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS)); + assertThat(globalConfig.supportedProtocols(), not(hasItem("TLSv1"))); } public void testThatOnlyKeystoreInSettingsSetsTruststoreSettings() { From df8fa9781ee4dc029065a7daa4d29ab6a9e5a200 Mon Sep 17 00:00:00 2001 From: Ricardo Ferreira Date: Fri, 25 Jan 2019 07:35:19 +0000 Subject: [PATCH 28/64] Remove Abstract Component (#35898) TransportAction and BaseRestHandler now no longer extends AbstractComponent. The AbstractComponent no longer has usages so it was deleted. Closes #34488 --- .../action/support/TransportAction.java | 10 ++++-- .../common/component/AbstractComponent.java | 36 ------------------- .../elasticsearch/rest/BaseRestHandler.java | 11 ++++-- 3 files changed, 15 insertions(+), 42 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java diff --git a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java index 22740ea6bf75d..fcb4905b95914 100644 --- a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -18,24 +18,28 @@ */ package org.elasticsearch.action.support; - +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskListener; import org.elasticsearch.tasks.TaskManager; import java.util.concurrent.atomic.AtomicInteger; -public abstract class TransportAction extends AbstractComponent { +public abstract class TransportAction { protected final String actionName; private final ActionFilter[] filters; protected final TaskManager taskManager; + /** + * @deprecated declare your own logger. + */ + @Deprecated + protected Logger logger = LogManager.getLogger(getClass()); protected TransportAction(String actionName, ActionFilters actionFilters, TaskManager taskManager) { this.actionName = actionName; diff --git a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java b/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java deleted file mode 100644 index 1e0310c024736..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.component; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -/** - * @deprecated declare your own logger - */ -@Deprecated -public abstract class AbstractComponent { - - protected final Logger logger; - - public AbstractComponent() { - this.logger = LogManager.getLogger(getClass()); - } -} diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 36189381c5b22..9e86b3a6f9497 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -18,13 +18,13 @@ */ package org.elasticsearch.rest; - +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -51,12 +51,17 @@ * are copied, but a selected few. It is possible to control what headers are copied over by returning them in * {@link ActionPlugin#getRestHeaders()}. */ -public abstract class BaseRestHandler extends AbstractComponent implements RestHandler { +public abstract class BaseRestHandler implements RestHandler { public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = Setting.boolSetting("rest.action.multi.allow_explicit_index", true, Property.NodeScope); private final LongAdder usageCount = new LongAdder(); + /** + * @deprecated declare your own logger. + */ + @Deprecated + protected Logger logger = LogManager.getLogger(getClass()); /** * Parameter that controls whether certain REST apis should include type names in their requests or responses. From 1151f3b3ffaa3c3d4024f3b19b816a3ae916d00a Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 25 Jan 2019 08:53:18 +0100 Subject: [PATCH 29/64] Fail with a dedicated exception if remote connection is missing or (#37767) or connectivity to the remote connection is failing. Relates to #37681 --- .../elasticsearch/ElasticsearchException.java | 4 +- .../NoSuchRemoteClusterException.java | 42 +++++++++++++++++++ .../transport/RemoteClusterConnection.java | 2 +- .../transport/RemoteClusterService.java | 6 +-- .../ExceptionSerializationTests.java | 2 + .../RemoteClusterConnectionTests.java | 6 +-- .../transport/RemoteClusterServiceTests.java | 2 +- .../ccr/action/AutoFollowCoordinator.java | 5 +-- .../xpack/ccr/action/ShardFollowNodeTask.java | 10 ++--- .../xpack/ccr/IndexFollowingIT.java | 9 ++-- 10 files changed, 64 insertions(+), 24 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/transport/NoSuchRemoteClusterException.java diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 1fc5ef474ee04..1e165217c2014 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1010,7 +1010,9 @@ private enum ElasticsearchExceptionHandle { COORDINATION_STATE_REJECTED_EXCEPTION(org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, Version.V_7_0_0), SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class, - org.elasticsearch.snapshots.SnapshotInProgressException::new, 151, Version.V_7_0_0); + org.elasticsearch.snapshots.SnapshotInProgressException::new, 151, Version.V_7_0_0), + NO_SUCH_REMOTE_CLUSTER_EXCEPTION(org.elasticsearch.transport.NoSuchRemoteClusterException.class, + org.elasticsearch.transport.NoSuchRemoteClusterException::new, 152, Version.V_7_0_0); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/transport/NoSuchRemoteClusterException.java b/server/src/main/java/org/elasticsearch/transport/NoSuchRemoteClusterException.java new file mode 100644 index 0000000000000..695c04a98ff3d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/NoSuchRemoteClusterException.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * An exception that remote cluster is missing or + * connectivity to the remote connection is failing + */ +public final class NoSuchRemoteClusterException extends ResourceNotFoundException { + + NoSuchRemoteClusterException(String clusterName) { + //No node available for cluster + super("no such remote cluster: [" + clusterName + "]"); + } + + public NoSuchRemoteClusterException(StreamInput in) throws IOException { + super(in); + } + +} diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index d7e3de92e4028..57820a8ca48a9 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -659,7 +659,7 @@ public synchronized DiscoveryNode getAny() { if (currentIterator.hasNext()) { return currentIterator.next(); } else { - throw new IllegalStateException("No node available for cluster: " + clusterAlias); + throw new NoSuchRemoteClusterException(clusterAlias); } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 7d19b2eebcb1d..009ee48dd8a99 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -337,7 +337,7 @@ public Transport.Connection getConnection(String cluster) { RemoteClusterConnection getRemoteClusterConnection(String cluster) { RemoteClusterConnection connection = remoteClusters.get(cluster); if (connection == null) { - throw new IllegalArgumentException("no such remote cluster: " + cluster); + throw new NoSuchRemoteClusterException(cluster); } return connection; } @@ -415,7 +415,7 @@ public void collectNodes(Set clusters, ActionListener remoteClusters = this.remoteClusters; for (String cluster : clusters) { if (remoteClusters.containsKey(cluster) == false) { - listener.onFailure(new IllegalArgumentException("no such remote cluster: [" + cluster + "]")); + listener.onFailure(new NoSuchRemoteClusterException(cluster)); return; } } @@ -456,7 +456,7 @@ public void onFailure(Exception e) { */ public Client getRemoteClusterClient(ThreadPool threadPool, String clusterAlias) { if (transportService.getRemoteClusterService().getRemoteClusterNames().contains(clusterAlias) == false) { - throw new IllegalArgumentException("unknown cluster alias [" + clusterAlias + "]"); + throw new NoSuchRemoteClusterException(clusterAlias); } return new RemoteClusterAwareClient(settings, threadPool, transportService, clusterAlias); } diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index b4ce907d6f7b3..97ce870d1bada 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -85,6 +85,7 @@ import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.transport.ActionTransportException; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.transport.TcpTransport; import java.io.EOFException; @@ -809,6 +810,7 @@ public void testIds() { ids.put(149, MultiBucketConsumerService.TooManyBucketsException.class); ids.put(150, CoordinationStateRejectedException.class); ids.put(151, SnapshotInProgressException.class); + ids.put(152, NoSuchRemoteClusterException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 308d330d54f61..3ec2506da244e 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -1000,10 +1000,8 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted try { DiscoveryNode node = connection.getAnyConnectedNode(); assertNotNull(node); - } catch (IllegalStateException e) { - if (e.getMessage().startsWith("No node available for cluster:") == false) { - throw e; - } + } catch (NoSuchRemoteClusterException e) { + // ignore, this is an expected exception } } } catch (Exception ex) { diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 60f3ece86bcbe..2407106273f3f 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -657,7 +657,7 @@ public void onFailure(Exception e) { }); failLatch.await(); assertNotNull(ex.get()); - assertTrue(ex.get() instanceof IllegalArgumentException); + assertTrue(ex.get() instanceof NoSuchRemoteClusterException); assertEquals("no such remote cluster: [no such cluster]", ex.get().getMessage()); } { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 19d2b9ce4797e..82153e77fc35e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; import org.elasticsearch.xpack.ccr.CcrSettings; @@ -374,9 +375,7 @@ void start() { autoFollowIndices(autoFollowMetadata, clusterState, remoteClusterState, patterns); } else { assert remoteError != null; - String expectedErrorMessage = "unknown cluster alias [" + remoteCluster + "]"; - if (remoteError instanceof IllegalArgumentException && - expectedErrorMessage.equals(remoteError.getMessage())) { + if (remoteError instanceof NoSuchRemoteClusterException) { LOGGER.info("AutoFollower for cluster [{}] has stopped, because remote connection is gone", remoteCluster); remoteClusterConnectionMissing = true; return; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index d40fdd551be47..7d8e1fa884757 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -30,6 +30,7 @@ import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -451,10 +452,6 @@ static boolean shouldRetry(String remoteCluster, Exception e) { return true; } - // This is thrown when using a Client and its remote cluster alias went MIA - String noSuchRemoteClusterMessage = "no such remote cluster: " + remoteCluster; - // This is thrown when creating a Client and the remote cluster does not exist: - String unknownClusterMessage = "unknown cluster alias [" + remoteCluster + "]"; final Throwable actual = ExceptionsHelper.unwrapCause(e); return actual instanceof ShardNotFoundException || actual instanceof IllegalIndexShardStateException || @@ -466,9 +463,8 @@ static boolean shouldRetry(String remoteCluster, Exception e) { actual instanceof IndexClosedException || // If follow index is closed actual instanceof ConnectTransportException || actual instanceof NodeClosedException || - (actual.getMessage() != null && actual.getMessage().contains("TransportService is closed")) || - (actual instanceof IllegalArgumentException && (noSuchRemoteClusterMessage.equals(actual.getMessage()) || - unknownClusterMessage.equals(actual.getMessage()))); + actual instanceof NoSuchRemoteClusterException || + (actual.getMessage() != null && actual.getMessage().contains("TransportService is closed")); } // These methods are protected for testing purposes: diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index e811480e1b1a0..648c295efa817 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -49,6 +49,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -574,16 +575,16 @@ public void testUnknownClusterAlias() throws Exception { ensureLeaderGreen("index1"); PutFollowAction.Request followRequest = putFollow("index1", "index2"); followRequest.setRemoteCluster("another_cluster"); - Exception e = expectThrows(IllegalArgumentException.class, + Exception e = expectThrows(NoSuchRemoteClusterException.class, () -> followerClient().execute(PutFollowAction.INSTANCE, followRequest).actionGet()); - assertThat(e.getMessage(), equalTo("unknown cluster alias [another_cluster]")); + assertThat(e.getMessage(), equalTo("no such remote cluster: [another_cluster]")); PutAutoFollowPatternAction.Request putAutoFollowRequest = new PutAutoFollowPatternAction.Request(); putAutoFollowRequest.setName("name"); putAutoFollowRequest.setRemoteCluster("another_cluster"); putAutoFollowRequest.setLeaderIndexPatterns(Collections.singletonList("logs-*")); - e = expectThrows(IllegalArgumentException.class, + e = expectThrows(NoSuchRemoteClusterException.class, () -> followerClient().execute(PutAutoFollowPatternAction.INSTANCE, putAutoFollowRequest).actionGet()); - assertThat(e.getMessage(), equalTo("unknown cluster alias [another_cluster]")); + assertThat(e.getMessage(), equalTo("no such remote cluster: [another_cluster]")); } public void testLeaderIndexRed() throws Exception { From 5a9dadb3ff287893446d39bd9865269583cb8971 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 25 Jan 2019 09:18:42 +0100 Subject: [PATCH 30/64] changed versionAdded now that #37767 is backedported --- .../src/main/java/org/elasticsearch/ElasticsearchException.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 1e165217c2014..ebfb2e270be80 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1012,7 +1012,7 @@ private enum ElasticsearchExceptionHandle { SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class, org.elasticsearch.snapshots.SnapshotInProgressException::new, 151, Version.V_7_0_0), NO_SUCH_REMOTE_CLUSTER_EXCEPTION(org.elasticsearch.transport.NoSuchRemoteClusterException.class, - org.elasticsearch.transport.NoSuchRemoteClusterException::new, 152, Version.V_7_0_0); + org.elasticsearch.transport.NoSuchRemoteClusterException::new, 152, Version.V_6_7_0); final Class exceptionClass; final CheckedFunction constructor; From 7692b607b930f632d6ea5e5e0eda55fcb57cfc45 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Jan 2019 09:38:29 +0100 Subject: [PATCH 31/64] Fix ClusterDisruptionIT#testAckedIndexing (#37853) * Stop threads before logging the list of exceptions * For the broken case of concurrent iteration in the finally block and the threads not having shut down, use `CopyOnWriteArrayList` to have concurrency safe iteration * Closes #37810 --- .../discovery/ClusterDisruptionIT.java | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index f1e78fd3c6ae6..d94c34c7b33eb 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -50,6 +50,7 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; @@ -84,7 +85,6 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase { "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE," + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37810") public void testAckedIndexing() throws Exception { final int seconds = !(TEST_NIGHTLY && rarely()) ? 1 : 5; @@ -109,7 +109,7 @@ public void testAckedIndexing() throws Exception { List semaphores = new ArrayList<>(nodes.size()); final AtomicInteger idGenerator = new AtomicInteger(0); final AtomicReference countDownLatchRef = new AtomicReference<>(); - final List exceptedExceptions = Collections.synchronizedList(new ArrayList()); + final List exceptedExceptions = new CopyOnWriteArrayList<>(); logger.info("starting indexers"); try { @@ -215,6 +215,12 @@ public void testAckedIndexing() throws Exception { logger.info("done validating (iteration [{}])", iter); } } finally { + logger.info("shutting down indexers"); + stop.set(true); + for (Thread indexer : indexers) { + indexer.interrupt(); + indexer.join(60000); + } if (exceptedExceptions.size() > 0) { StringBuilder sb = new StringBuilder(); for (Exception e : exceptedExceptions) { @@ -222,12 +228,6 @@ public void testAckedIndexing() throws Exception { } logger.debug("Indexing exceptions during disruption: {}", sb); } - logger.info("shutting down indexers"); - stop.set(true); - for (Thread indexer : indexers) { - indexer.interrupt(); - indexer.join(60000); - } } } From 170d7413d099a624d6d7ba7448ecd1b228d0f64c Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 25 Jan 2019 09:29:53 +0000 Subject: [PATCH 32/64] [ML] Fix gaps in reserved roles tests (#37772) Some of our newer endpoints and indices were missing from the tests. --- .../authz/store/ReservedRolesStoreTests.java | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 7f4dbcee4ed5d..dc077a17e5c59 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -44,14 +44,21 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; +import org.elasticsearch.xpack.core.ml.action.DeleteCalendarAction; +import org.elasticsearch.xpack.core.ml.action.DeleteCalendarEventAction; import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; +import org.elasticsearch.xpack.core.ml.action.DeleteForecastAction; import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; +import org.elasticsearch.xpack.core.ml.action.FindFileStructureAction; import org.elasticsearch.xpack.core.ml.action.FlushJobAction; +import org.elasticsearch.xpack.core.ml.action.ForecastJobAction; import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; +import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction; +import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction; import org.elasticsearch.xpack.core.ml.action.GetCategoriesAction; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; @@ -60,24 +67,32 @@ import org.elasticsearch.xpack.core.ml.action.GetJobsAction; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; +import org.elasticsearch.xpack.core.ml.action.GetOverallBucketsAction; import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; import org.elasticsearch.xpack.core.ml.action.IsolateDatafeedAction; import org.elasticsearch.xpack.core.ml.action.KillProcessAction; +import org.elasticsearch.xpack.core.ml.action.MlInfoAction; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.action.PersistJobAction; +import org.elasticsearch.xpack.core.ml.action.PostCalendarEventsAction; import org.elasticsearch.xpack.core.ml.action.PostDataAction; import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.PutCalendarAction; import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; import org.elasticsearch.xpack.core.ml.action.PutFilterAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.UpdateProcessAction; import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; +import org.elasticsearch.xpack.core.ml.annotations.AnnotationIndex; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; import org.elasticsearch.xpack.core.ml.notifications.AuditorField; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; @@ -765,14 +780,21 @@ public void testMachineLearningAdminRole() { Role role = Role.builder(roleDescriptor, null).build(); assertThat(role.cluster().check(CloseJobAction.NAME, request), is(true)); + assertThat(role.cluster().check(DeleteCalendarAction.NAME, request), is(true)); + assertThat(role.cluster().check(DeleteCalendarEventAction.NAME, request), is(true)); assertThat(role.cluster().check(DeleteDatafeedAction.NAME, request), is(true)); assertThat(role.cluster().check(DeleteExpiredDataAction.NAME, request), is(true)); assertThat(role.cluster().check(DeleteFilterAction.NAME, request), is(true)); + assertThat(role.cluster().check(DeleteForecastAction.NAME, request), is(true)); assertThat(role.cluster().check(DeleteJobAction.NAME, request), is(true)); assertThat(role.cluster().check(DeleteModelSnapshotAction.NAME, request), is(true)); assertThat(role.cluster().check(FinalizeJobExecutionAction.NAME, request), is(false)); // internal use only + assertThat(role.cluster().check(FindFileStructureAction.NAME, request), is(true)); assertThat(role.cluster().check(FlushJobAction.NAME, request), is(true)); + assertThat(role.cluster().check(ForecastJobAction.NAME, request), is(true)); assertThat(role.cluster().check(GetBucketsAction.NAME, request), is(true)); + assertThat(role.cluster().check(GetCalendarEventsAction.NAME, request), is(true)); + assertThat(role.cluster().check(GetCalendarsAction.NAME, request), is(true)); assertThat(role.cluster().check(GetCategoriesAction.NAME, request), is(true)); assertThat(role.cluster().check(GetDatafeedsAction.NAME, request), is(true)); assertThat(role.cluster().check(GetDatafeedsStatsAction.NAME, request), is(true)); @@ -781,19 +803,26 @@ public void testMachineLearningAdminRole() { assertThat(role.cluster().check(GetJobsAction.NAME, request), is(true)); assertThat(role.cluster().check(GetJobsStatsAction.NAME, request), is(true)); assertThat(role.cluster().check(GetModelSnapshotsAction.NAME, request), is(true)); + assertThat(role.cluster().check(GetOverallBucketsAction.NAME, request), is(true)); assertThat(role.cluster().check(GetRecordsAction.NAME, request), is(true)); assertThat(role.cluster().check(IsolateDatafeedAction.NAME, request), is(false)); // internal use only assertThat(role.cluster().check(KillProcessAction.NAME, request), is(false)); // internal use only + assertThat(role.cluster().check(MlInfoAction.NAME, request), is(true)); assertThat(role.cluster().check(OpenJobAction.NAME, request), is(true)); + assertThat(role.cluster().check(PersistJobAction.NAME, request), is(true)); + assertThat(role.cluster().check(PostCalendarEventsAction.NAME, request), is(true)); assertThat(role.cluster().check(PostDataAction.NAME, request), is(true)); assertThat(role.cluster().check(PreviewDatafeedAction.NAME, request), is(true)); + assertThat(role.cluster().check(PutCalendarAction.NAME, request), is(true)); assertThat(role.cluster().check(PutDatafeedAction.NAME, request), is(true)); assertThat(role.cluster().check(PutFilterAction.NAME, request), is(true)); assertThat(role.cluster().check(PutJobAction.NAME, request), is(true)); assertThat(role.cluster().check(RevertModelSnapshotAction.NAME, request), is(true)); assertThat(role.cluster().check(StartDatafeedAction.NAME, request), is(true)); assertThat(role.cluster().check(StopDatafeedAction.NAME, request), is(true)); + assertThat(role.cluster().check(UpdateCalendarJobAction.NAME, request), is(true)); assertThat(role.cluster().check(UpdateDatafeedAction.NAME, request), is(true)); + assertThat(role.cluster().check(UpdateFilterAction.NAME, request), is(true)); assertThat(role.cluster().check(UpdateJobAction.NAME, request), is(true)); assertThat(role.cluster().check(UpdateModelSnapshotAction.NAME, request), is(true)); assertThat(role.cluster().check(UpdateProcessAction.NAME, request), is(false)); // internal use only @@ -802,10 +831,12 @@ public void testMachineLearningAdminRole() { assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); assertNoAccessAllowed(role, "foo"); + assertNoAccessAllowed(role, AnomalyDetectorsIndexFields.CONFIG_INDEX); // internal use only assertOnlyReadAllowed(role, MlMetaIndex.INDEX_NAME); assertOnlyReadAllowed(role, AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX); assertOnlyReadAllowed(role, AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT); assertOnlyReadAllowed(role, AuditorField.NOTIFICATIONS_INDEX); + assertReadWriteDocsButNotDeleteIndexAllowed(role, AnnotationIndex.INDEX_NAME); assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); } @@ -819,14 +850,21 @@ public void testMachineLearningUserRole() { Role role = Role.builder(roleDescriptor, null).build(); assertThat(role.cluster().check(CloseJobAction.NAME, request), is(false)); + assertThat(role.cluster().check(DeleteCalendarAction.NAME, request), is(false)); + assertThat(role.cluster().check(DeleteCalendarEventAction.NAME, request), is(false)); assertThat(role.cluster().check(DeleteDatafeedAction.NAME, request), is(false)); assertThat(role.cluster().check(DeleteExpiredDataAction.NAME, request), is(false)); assertThat(role.cluster().check(DeleteFilterAction.NAME, request), is(false)); + assertThat(role.cluster().check(DeleteForecastAction.NAME, request), is(false)); assertThat(role.cluster().check(DeleteJobAction.NAME, request), is(false)); assertThat(role.cluster().check(DeleteModelSnapshotAction.NAME, request), is(false)); assertThat(role.cluster().check(FinalizeJobExecutionAction.NAME, request), is(false)); + assertThat(role.cluster().check(FindFileStructureAction.NAME, request), is(true)); assertThat(role.cluster().check(FlushJobAction.NAME, request), is(false)); + assertThat(role.cluster().check(ForecastJobAction.NAME, request), is(false)); assertThat(role.cluster().check(GetBucketsAction.NAME, request), is(true)); + assertThat(role.cluster().check(GetCalendarEventsAction.NAME, request), is(true)); + assertThat(role.cluster().check(GetCalendarsAction.NAME, request), is(true)); assertThat(role.cluster().check(GetCategoriesAction.NAME, request), is(true)); assertThat(role.cluster().check(GetDatafeedsAction.NAME, request), is(true)); assertThat(role.cluster().check(GetDatafeedsStatsAction.NAME, request), is(true)); @@ -835,19 +873,26 @@ public void testMachineLearningUserRole() { assertThat(role.cluster().check(GetJobsAction.NAME, request), is(true)); assertThat(role.cluster().check(GetJobsStatsAction.NAME, request), is(true)); assertThat(role.cluster().check(GetModelSnapshotsAction.NAME, request), is(true)); + assertThat(role.cluster().check(GetOverallBucketsAction.NAME, request), is(true)); assertThat(role.cluster().check(GetRecordsAction.NAME, request), is(true)); assertThat(role.cluster().check(IsolateDatafeedAction.NAME, request), is(false)); assertThat(role.cluster().check(KillProcessAction.NAME, request), is(false)); + assertThat(role.cluster().check(MlInfoAction.NAME, request), is(true)); assertThat(role.cluster().check(OpenJobAction.NAME, request), is(false)); + assertThat(role.cluster().check(PersistJobAction.NAME, request), is(false)); + assertThat(role.cluster().check(PostCalendarEventsAction.NAME, request), is(false)); assertThat(role.cluster().check(PostDataAction.NAME, request), is(false)); assertThat(role.cluster().check(PreviewDatafeedAction.NAME, request), is(false)); + assertThat(role.cluster().check(PutCalendarAction.NAME, request), is(false)); assertThat(role.cluster().check(PutDatafeedAction.NAME, request), is(false)); assertThat(role.cluster().check(PutFilterAction.NAME, request), is(false)); assertThat(role.cluster().check(PutJobAction.NAME, request), is(false)); assertThat(role.cluster().check(RevertModelSnapshotAction.NAME, request), is(false)); assertThat(role.cluster().check(StartDatafeedAction.NAME, request), is(false)); assertThat(role.cluster().check(StopDatafeedAction.NAME, request), is(false)); + assertThat(role.cluster().check(UpdateCalendarJobAction.NAME, request), is(false)); assertThat(role.cluster().check(UpdateDatafeedAction.NAME, request), is(false)); + assertThat(role.cluster().check(UpdateFilterAction.NAME, request), is(false)); assertThat(role.cluster().check(UpdateJobAction.NAME, request), is(false)); assertThat(role.cluster().check(UpdateModelSnapshotAction.NAME, request), is(false)); assertThat(role.cluster().check(UpdateProcessAction.NAME, request), is(false)); @@ -856,10 +901,12 @@ public void testMachineLearningUserRole() { assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); assertNoAccessAllowed(role, "foo"); + assertNoAccessAllowed(role, AnomalyDetectorsIndexFields.CONFIG_INDEX); assertNoAccessAllowed(role, MlMetaIndex.INDEX_NAME); assertNoAccessAllowed(role, AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX); assertOnlyReadAllowed(role, AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT); assertOnlyReadAllowed(role, AuditorField.NOTIFICATIONS_INDEX); + assertReadWriteDocsButNotDeleteIndexAllowed(role, AnnotationIndex.INDEX_NAME); assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); } @@ -923,6 +970,16 @@ public void testWatcherUserRole() { assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); } + private void assertReadWriteDocsButNotDeleteIndexAllowed(Role role, String index) { + assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(false)); + assertThat(role.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true)); + assertThat(role.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); + assertThat(role.indices().allowedIndicesMatcher(IndexAction.NAME).test(index), is(true)); + assertThat(role.indices().allowedIndicesMatcher(UpdateAction.NAME).test(index), is(true)); + assertThat(role.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(true)); + assertThat(role.indices().allowedIndicesMatcher(BulkAction.NAME).test(index), is(true)); + } + private void assertOnlyReadAllowed(Role role, String index) { assertThat(role.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(false)); assertThat(role.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(false)); From 9e7fd8caed6bd67e0268f7cc71736f7dbaa0cf5c Mon Sep 17 00:00:00 2001 From: Andrey Ershov Date: Fri, 25 Jan 2019 11:17:09 +0100 Subject: [PATCH 33/64] Migrate ZenDiscoveryIT to Zen2 (#37465) ZenDiscoveryIT contained 5 tests. 3 run without changes, testNodeRejectsClusterStateWithWrongMasterNode removed, testHandleNodeJoin_incompatibleClusterState changed. --- .../cluster/coordination/Coordinator.java | 44 +++++--- .../coordination}/ZenDiscoveryIT.java | 104 ++++-------------- 2 files changed, 50 insertions(+), 98 deletions(-) rename server/src/test/java/org/elasticsearch/{discovery/zen => cluster/coordination}/ZenDiscoveryIT.java (70%) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index a4e1d3ed8c990..aabe5466d69a9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -397,6 +397,7 @@ private Join joinLeaderInTerm(StartJoinRequest startJoinRequest) { } } + private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinCallback) { assert Thread.holdsLock(mutex) == false; assert getLocalNode().isMasterNode() : getLocalNode() + " received a join but is not master-eligible"; @@ -413,30 +414,37 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback JoinTaskExecutor.ensureMajorVersionBarrier(joinRequest.getSourceNode().getVersion(), stateForJoinValidation.getNodes().getMinNodeVersion()); } + sendValidateJoinRequest(stateForJoinValidation, joinRequest, joinCallback); - // validate the join on the joining node, will throw a failure if it fails the validation - joinHelper.sendValidateJoinRequest(joinRequest.getSourceNode(), stateForJoinValidation, new ActionListener() { - @Override - public void onResponse(Empty empty) { - try { - processJoinRequest(joinRequest, joinCallback); - } catch (Exception e) { - joinCallback.onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", - joinRequest.getSourceNode()), e); - joinCallback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); - } - }); } else { processJoinRequest(joinRequest, joinCallback); } } + // package private for tests + void sendValidateJoinRequest(ClusterState stateForJoinValidation, JoinRequest joinRequest, + JoinHelper.JoinCallback joinCallback) { + // validate the join on the joining node, will throw a failure if it fails the validation + joinHelper.sendValidateJoinRequest(joinRequest.getSourceNode(), stateForJoinValidation, new ActionListener() { + @Override + public void onResponse(Empty empty) { + try { + processJoinRequest(joinRequest, joinCallback); + } catch (Exception e) { + joinCallback.onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", + joinRequest.getSourceNode()), e); + joinCallback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); + } + }); + } + + private void processJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinCallback) { final Optional optionalJoin = joinRequest.getOptionalJoin(); synchronized (mutex) { diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java similarity index 70% rename from server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java rename to server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java index cda7065612714..d737ef790b5bd 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java @@ -17,9 +17,8 @@ * under the License. */ -package org.elasticsearch.discovery.zen; +package org.elasticsearch.cluster.coordination; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -27,41 +26,32 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryStats; +import org.elasticsearch.discovery.zen.FaultDetection; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.TestCustomMetaData; -import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BytesTransportRequest; -import org.elasticsearch.transport.EmptyTransportResponseHandler; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.RemoteTransportException; import java.io.IOException; -import java.net.UnknownHostException; -import java.util.ArrayList; import java.util.EnumSet; -import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; @@ -73,13 +63,6 @@ @TestLogging("_root:DEBUG") public class ZenDiscoveryIT extends ESIntegTestCase { - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // Zen1-specific stuff in some tests - .build(); - } - public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception { Settings defaultSettings = Settings.builder() .put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") @@ -122,7 +105,7 @@ public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Excep assertThat(numRecoveriesAfterNewMaster, equalTo(numRecoveriesBeforeNewMaster)); } - public void testNodeFailuresAreProcessedOnce() throws ExecutionException, InterruptedException, IOException { + public void testNodeFailuresAreProcessedOnce() throws IOException { Settings defaultSettings = Settings.builder() .put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") .put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") @@ -161,78 +144,39 @@ public void testNodeFailuresAreProcessedOnce() throws ExecutionException, Interr assertThat(numUpdates.get(), either(equalTo(1)).or(equalTo(2))); // due to batching, both nodes can be handled in same CS update } - public void testNodeRejectsClusterStateWithWrongMasterNode() throws Exception { - List nodeNames = internalCluster().startNodes(2); - - List nonMasterNodes = new ArrayList<>(nodeNames); - nonMasterNodes.remove(internalCluster().getMasterName()); - String noneMasterNode = nonMasterNodes.get(0); - - ClusterState state = internalCluster().getInstance(ClusterService.class).state(); - DiscoveryNode node = null; - for (DiscoveryNode discoveryNode : state.nodes()) { - if (discoveryNode.getName().equals(noneMasterNode)) { - node = discoveryNode; - } - } - assert node != null; - - DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(state.nodes()) - .add(new DiscoveryNode("abc", buildNewFakeTransportAddress(), emptyMap(), - emptySet(), Version.CURRENT)).masterNodeId("abc"); - ClusterState.Builder builder = ClusterState.builder(state); - builder.nodes(nodes); - BytesReference bytes = PublishClusterStateAction.serializeFullClusterState(builder.build(), node.getVersion()); - - final CountDownLatch latch = new CountDownLatch(1); - final AtomicReference reference = new AtomicReference<>(); - internalCluster().getInstance(TransportService.class, noneMasterNode).sendRequest(node, PublishClusterStateAction.SEND_ACTION_NAME, - new BytesTransportRequest(bytes, Version.CURRENT), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - - @Override - public void handleResponse(TransportResponse.Empty response) { - super.handleResponse(response); - latch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - super.handleException(exp); - reference.set(exp); - latch.countDown(); - } - }); - latch.await(); - assertThat(reference.get(), notNullValue()); - assertThat(ExceptionsHelper.detailedMessage(reference.get()), - containsString("cluster state from a different master than the current one, rejecting")); - } - - public void testHandleNodeJoin_incompatibleClusterState() throws UnknownHostException { - String masterOnlyNode = internalCluster().startMasterOnlyNode(); + public void testHandleNodeJoin_incompatibleClusterState() + throws InterruptedException, ExecutionException, TimeoutException { + String masterNode = internalCluster().startMasterOnlyNode(); String node1 = internalCluster().startNode(); - ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, masterOnlyNode); ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node1); + Coordinator coordinator = (Coordinator) internalCluster().getInstance(Discovery.class, masterNode); final ClusterState state = clusterService.state(); MetaData.Builder mdBuilder = MetaData.builder(state.metaData()); mdBuilder.putCustom(CustomMetaData.TYPE, new CustomMetaData("data")); ClusterState stateWithCustomMetaData = ClusterState.builder(state).metaData(mdBuilder).build(); - final AtomicReference holder = new AtomicReference<>(); + final CompletableFuture future = new CompletableFuture<>(); DiscoveryNode node = state.nodes().getLocalNode(); - zenDiscovery.handleJoinRequest(node, stateWithCustomMetaData, new MembershipAction.JoinCallback() { + + coordinator.sendValidateJoinRequest(stateWithCustomMetaData, new JoinRequest(node, Optional.empty()), + new JoinHelper.JoinCallback() { @Override public void onSuccess() { + future.completeExceptionally(new AssertionError("onSuccess should not be called")); } @Override public void onFailure(Exception e) { - holder.set((IllegalStateException) e); + future.complete(e); } }); - assertThat(holder.get(), notNullValue()); - assertThat(holder.get().getMessage(), equalTo("failure when sending a validation request to node")); + Throwable t = future.get(10, TimeUnit.SECONDS); + + assertTrue(t instanceof IllegalStateException); + assertTrue(t.getCause() instanceof RemoteTransportException); + assertTrue(t.getCause().getCause() instanceof IllegalArgumentException); + assertThat(t.getCause().getCause().getMessage(), containsString("Unknown NamedWriteable")); } public static class CustomMetaData extends TestCustomMetaData { From a3baa8f5effa7c8466151b72758f1e574896a61a Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 25 Jan 2019 11:27:27 +0100 Subject: [PATCH 34/64] Freezing an index should increase its index settings version (#37813) When an index is frozen, two index settings are updated (index.frozen and index.search.throttled) but the settings version is left unchanged and does not reflect the settings update. This commit change the TransportFreezeIndexAction so that it also increases the settings version when an index is frozen/unfrozen. This issue has been caught while working on the replication of closed indices (#3388) in which index metadata for a closed index are updated to frozen metadata and this specific assertion tripped. --- .../core/action/TransportFreezeIndexAction.java | 1 + .../index/engine/FrozenIndexTests.java | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java index 3031ec5b2a409..1efe5389d81b2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java @@ -183,6 +183,7 @@ public ClusterState execute(ClusterState currentState) { throw new IllegalStateException("index [" + index.getName() + "] is not closed"); } final IndexMetaData.Builder imdBuilder = IndexMetaData.builder(meta); + imdBuilder.settingsVersion(meta.getSettingsVersion() + 1); final Settings.Builder settingsBuilder = Settings.builder() .put(currentState.metaData().index(index).getSettings()) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index 884dafdcd395b..c0493b6efd1fe 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -45,6 +45,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; public class FrozenIndexTests extends ESSingleNodeTestCase { @@ -324,4 +325,19 @@ public void testUnfreezeClosedIndex() throws ExecutionException, InterruptedExce assertEquals(IndexMetaData.State.OPEN, client().admin().cluster().prepareState().get().getState().metaData().index("idx").getState()); } + + public void testFreezeIndexIncreasesIndexSettingsVersion() throws ExecutionException, InterruptedException { + final String index = "test"; + createIndex(index, Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build()); + client().prepareIndex(index, "_doc").setSource("field", "value").execute().actionGet(); + + final long settingsVersion = client().admin().cluster().prepareState().get() + .getState().metaData().index(index).getSettingsVersion(); + + XPackClient xPackClient = new XPackClient(client()); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest(index))); + assertIndexFrozen(index); + assertThat(client().admin().cluster().prepareState().get().getState().metaData().index(index).getSettingsVersion(), + equalTo(settingsVersion + 1)); + } } From be6bdab346fa5058361ed18cbd3cc29ff9052b4f Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Jan 2019 12:56:40 +0100 Subject: [PATCH 35/64] Use TestFixturesPlugin to Run Minio in Tests (#37852) * Use TestFixturesPlugin to Run Minio in Tests * Closes #37680 * Closes #37783 --- plugins/repository-s3/build.gradle | 200 +++-------------------- plugins/repository-s3/docker-compose.yml | 9 + 2 files changed, 29 insertions(+), 180 deletions(-) create mode 100644 plugins/repository-s3/docker-compose.yml diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 541e6dc42f553..49c60d2edd730 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -1,13 +1,10 @@ -import org.apache.tools.ant.taskdefs.condition.Os -import org.elasticsearch.gradle.LoggedExec +import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture import org.elasticsearch.gradle.test.ClusterConfiguration import org.elasticsearch.gradle.test.RestIntegTestTask import com.carrotsearch.gradle.junit4.RandomizedTestingTask -import java.lang.reflect.Field - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -139,25 +136,6 @@ if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present") } - -final String minioVersion = 'RELEASE.2018-06-22T23-48-46Z' -final String minioBinDir = "${buildDir}/minio/bin" -final String minioDataDir = "${buildDir}/minio/data" -final String minioAddress = "127.0.0.1" - -String minioDistribution -String minioCheckSum -if (Os.isFamily(Os.FAMILY_MAC)) { - minioDistribution = 'darwin-amd64' - minioCheckSum = '96b0bcb2f590e8e65fb83d5c3e221f9bd1106b49fa6f22c6b726b80b845d7c60' -} else if (Os.isFamily(Os.FAMILY_UNIX)) { - minioDistribution = 'linux-amd64' - minioCheckSum = '713dac7c105285eab3b92649be92b5e793b29d3525c7929fa7aaed99374fad99' -} else { - minioDistribution = null - minioCheckSum = null -} - buildscript { repositories { maven { @@ -169,177 +147,39 @@ buildscript { } } -private static int freePort(String minioAddress) { - int minioPort - ServerSocket serverSocket = new ServerSocket(0, 1, InetAddress.getByName(minioAddress)) - try { - minioPort = serverSocket.localPort - } finally { - serverSocket.close() - } - if (minioPort == 0) { - throw new GradleException("Could not find a free port for Minio") - } - return minioPort -} - -private int getMinioPid(Process minioProcess) { - int minioPid - if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { - try { - Class cProcessImpl = minioProcess.getClass() - Field fPid = cProcessImpl.getDeclaredField("pid") - if (!fPid.isAccessible()) { - fPid.setAccessible(true) - } - minioPid = fPid.getInt(minioProcess) - } catch (Exception e) { - logger.error("failed to read pid from minio process", e) - minioProcess.destroyForcibly() - throw e - } - } else { - minioPid = minioProcess.pid() - } - return minioPid -} - -private static Process setupMinio(String minioAddress, int minioPort, String minioDataDir, String accessKey, String secretKey, - String minioBinDir, String minioFileName) { - // we skip these tests on Windows so we do no need to worry about compatibility here - final ProcessBuilder minio = new ProcessBuilder( - "${minioBinDir}/${minioFileName}", - "server", - "--address", - minioAddress + ":" + minioPort, - minioDataDir) - minio.environment().put('MINIO_ACCESS_KEY', accessKey) - minio.environment().put('MINIO_SECRET_KEY', secretKey) - return minio.start() -} - -private void addShutdownHook(Process minioProcess, int minioPort, int minioPid) { - new BufferedReader(new InputStreamReader(minioProcess.inputStream)).withReader { br -> - String line - int httpPort = 0 - while ((line = br.readLine()) != null) { - logger.info(line) - if (line.matches('.*Endpoint.*:\\d+$')) { - assert httpPort == 0 - final int index = line.lastIndexOf(":") - assert index >= 0 - httpPort = Integer.parseInt(line.substring(index + 1)) - if (httpPort != minioPort) { - throw new IllegalStateException("Port mismatch, expected ${minioPort} but was ${httpPort}") - } - - final File script = new File(project.buildDir, "minio/minio.killer.sh") - script.setText( - ["function shutdown {", - " kill ${minioPid}", - "}", - "trap shutdown EXIT", - // will wait indefinitely for input, but we never pass input, and the pipe is only closed when the build dies - "read line\n"].join('\n'), 'UTF-8') - final ProcessBuilder killer = new ProcessBuilder("bash", script.absolutePath) - killer.start() - break - } - } - - if (httpPort <= 0) { - throw new IllegalStateException("httpPort must be > 0") - } - } -} - -if (useFixture && minioDistribution) { - apply plugin: 'de.undercouch.download' - - final String minioFileName = "minio.${minioVersion}" - final String minioDownloadURL = "https://dl.minio.io/server/minio/release/${minioDistribution}/archive/${minioFileName}" - final String minioFilePath = "${gradle.gradleUserHomeDir}/downloads/minio/${minioDistribution}/${minioFileName}" +if (useFixture) { - task downloadMinio(type: Download) { - src minioDownloadURL - dest minioFilePath - onlyIfModified true - } + apply plugin: 'elasticsearch.test.fixtures' - task verifyMinioChecksum(type: Verify, dependsOn: downloadMinio) { - src minioFilePath - algorithm 'SHA-256' - checksum minioCheckSum - } - - task installMinio(type: Sync, dependsOn: verifyMinioChecksum) { - from minioFilePath - into minioBinDir - fileMode 0755 + RestIntegTestTask integTestMinio = project.tasks.create('integTestMinio', RestIntegTestTask.class) { + description = "Runs REST tests using the Minio repository." } - task startMinio { - dependsOn installMinio - - ext.minioPid = 0L - ext.minioPort = 0 - + Task writeDockerFile = project.tasks.create('writeDockerFile') { + File minioDockerfile = new File("${project.buildDir}/minio-docker/Dockerfile") + outputs.file(minioDockerfile) doLast { - new File("${minioDataDir}/${s3PermanentBucket}").mkdirs() - - Exception accumulatedException = null - for (int i = 0; i < 5; ++i) { - try { - minioPort = freePort(minioAddress) - final Process process = - setupMinio(minioAddress, minioPort, minioDataDir, s3PermanentAccessKey, s3PermanentSecretKey, minioBinDir, minioFileName) - minioPid = getMinioPid(process) - addShutdownHook(process, minioPort, minioPid) - break - } catch (Exception e) { - logger.error("Exception while trying to start Minio {}", e) - if (accumulatedException == null) { - accumulated = e - } else { - accumulatedException.addSuppressed(e) - } - } - } - if (accumulatedException != null) { - throw new GradleException("Failed to start Minio", accumulatedException) - } + minioDockerfile.parentFile.mkdirs() + minioDockerfile.text = "FROM minio/minio:RELEASE.2019-01-23T23-18-58Z\n" + + "RUN mkdir -p /minio/data/${s3PermanentBucket}\n" + + "ENV MINIO_ACCESS_KEY ${s3PermanentAccessKey}\n" + + "ENV MINIO_SECRET_KEY ${s3PermanentSecretKey}" } } - task stopMinio(type: LoggedExec) { - onlyIf { startMinio.minioPid > 0 } - - doFirst { - logger.info("Shutting down minio with pid ${startMinio.minioPid}") - } - - final Object pid = "${ -> startMinio.minioPid }" - - // we skip these tests on Windows so we do no need to worry about compatibility here - executable = 'kill' - args('-9', pid) - } - - RestIntegTestTask integTestMinio = project.tasks.create('integTestMinio', RestIntegTestTask.class) { - description = "Runs REST tests using the Minio repository." - } - + preProcessFixture.dependsOn(writeDockerFile) // The following closure must execute before the afterEvaluate block in the constructor of the following integrationTest tasks: project.afterEvaluate { ClusterConfiguration cluster = project.extensions.getByName('integTestMinioCluster') as ClusterConfiguration cluster.dependsOn(project.bundlePlugin) - cluster.dependsOn(startMinio) // otherwise we don't know the Minio port + cluster.dependsOn(postProcessFixture) cluster.keystoreSetting 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey cluster.keystoreSetting 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey Closure minioAddressAndPort = { - assert startMinio.minioPort > 0 - return 'http://' + minioAddress + ':' + startMinio.minioPort + int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" + assert minioPort > 0 + return 'http://127.0.0.1:' + minioPort } cluster.setting 's3.client.integration_test_permanent.endpoint', "${ -> minioAddressAndPort.call()}" @@ -354,8 +194,7 @@ if (useFixture && minioDistribution) { restIntegTestTask.clusterConfig.jvmArgs = jvmArgs } - integTestMinioRunner.dependsOn(startMinio) - integTestMinioRunner.finalizedBy(stopMinio) + integTestMinioRunner.dependsOn(postProcessFixture) // Minio only supports a single access key, see https://github.com/minio/minio/pull/5968 integTestMinioRunner.systemProperty 'tests.rest.blacklist', [ 'repository_s3/30_repository_temporary_credentials/*', @@ -364,6 +203,7 @@ if (useFixture && minioDistribution) { ].join(",") project.check.dependsOn(integTestMinio) + BuildPlugin.requireDocker(integTestMinio) } File parentFixtures = new File(project.buildDir, "fixtures") diff --git a/plugins/repository-s3/docker-compose.yml b/plugins/repository-s3/docker-compose.yml new file mode 100644 index 0000000000000..e44750550e271 --- /dev/null +++ b/plugins/repository-s3/docker-compose.yml @@ -0,0 +1,9 @@ +version: '3' +services: + minio-fixture: + build: + context: ./build/minio-docker + dockerfile: Dockerfile + ports: + - "9000" + command: ["server", "/minio/data"] \ No newline at end of file From 97e89ad0bc6f1d4bb13571fdca05280d2369e93f Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 25 Jan 2019 14:13:34 +0200 Subject: [PATCH 36/64] Testclusters logging improovements (#37354) - Cluster logs are only indented as node name is already in the logs - silence logging on shutdown - have fully qualified name as node and cluster name --- .../elasticsearch/gradle/testclusters/ElasticsearchNode.java | 2 +- .../elasticsearch/gradle/testclusters/TestClustersPlugin.java | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 56351f0c53462..cb7986b9a3051 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -288,7 +288,7 @@ private void logFileContents(String description, Path from) { logger.error("{} `{}`", description, this); try(Stream lines = Files.lines(from, StandardCharsets.UTF_8)) { lines - .map(line -> " [" + name + "]" + line) + .map(line -> " " + line) .forEach(logger::error); } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 8926f74ca39a7..3abc9a6a6177e 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -332,7 +332,9 @@ private static void shutdownExecutorService() { } private static void shutDownAllClusters() { - logger.info("Shutting down all test clusters", new RuntimeException()); + if (logger.isDebugEnabled()) { + logger.debug("Shutting down all test clusters", new RuntimeException()); + } synchronized (runningClusters) { runningClusters.forEach(each -> each.stop(true)); runningClusters.clear(); From 70af3c79831512f3f4edfb0bf06c733a09a82eb7 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Fri, 25 Jan 2019 07:13:46 -0500 Subject: [PATCH 37/64] Correct deprec log in RestGetFieldMappingAction (#37843) * Correct deprec log in RestGetFieldMappingAction Correct a class used for deprecation logging in RestGetFieldMappingAction * Correct deprec log in RestCreateIndexAction Correct a class used for deprecation logging in RestCreateIndexAction --- .../rest/action/admin/indices/RestCreateIndexAction.java | 2 +- .../rest/action/admin/indices/RestGetFieldMappingAction.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java index 61e8af47a43d5..adcdc0b281c2e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java @@ -40,7 +40,7 @@ public class RestCreateIndexAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger( - LogManager.getLogger(RestPutMappingAction.class)); + LogManager.getLogger(RestCreateIndexAction.class)); public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Using include_type_name in create " + "index requests is deprecated. The parameter will be removed in the next major version."; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java index a7065622a86e5..09a59d44f9737 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetFieldMappingAction.java @@ -47,7 +47,7 @@ public class RestGetFieldMappingAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger( - LogManager.getLogger(RestPutMappingAction.class)); + LogManager.getLogger(RestGetFieldMappingAction.class)); public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Using include_type_name in get " + "field mapping requests is deprecated. The parameter will be removed in the next major version."; From 7b516f99b975eaf8b8d52d179557e0b347ada00b Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 25 Jan 2019 14:14:12 +0200 Subject: [PATCH 38/64] Resolve BWC dependencies too when resolving deps (#37412) This will allow those to be cached too leading to faster builds and fewer opportunity for network failures --- distribution/bwc/build.gradle | 110 +++++++++++++++++++--------------- 1 file changed, 61 insertions(+), 49 deletions(-) diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index a0acce1ef64eb..4557e0eb1dc4e 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -134,70 +134,82 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre projectDirs.add("${baseDir}/${projectName}") artifactFiles.put(projectName, file("${checkoutDir}/${baseDir}/${projectName}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT.${projectName}")) } + + Closure createRunBwcGradleTask = { name, extraConfig -> + task "$name"(type: Exec) { + dependsOn checkoutBwcBranch, writeBuildMetadata + workingDir = checkoutDir + doFirst { + // Execution time so that the checkouts are available + List lines = file("${checkoutDir}/.ci/java-versions.properties").readLines() + environment( + 'JAVA_HOME', + getJavaHome(it, Integer.parseInt( + lines + .findAll({ it.startsWith("ES_BUILD_JAVA=java") }) + .collect({ it.replace("ES_BUILD_JAVA=java", "").trim() }) + .join("!!") + )) + ) + environment( + 'RUNTIME_JAVA_HOME', + getJavaHome(it, Integer.parseInt( + lines + .findAll({ it.startsWith("ES_RUNTIME_JAVA=java") }) + .collect({ it.replace("ES_RUNTIME_JAVA=java", "").trim() }) + .join("!!") + )) + ) + } - task buildBwcVersion(type: Exec) { - dependsOn checkoutBwcBranch, writeBuildMetadata - workingDir = checkoutDir - doFirst { - // Execution time so that the checkouts are available - List lines = file("${checkoutDir}/.ci/java-versions.properties").readLines() - environment( - 'JAVA_HOME', - getJavaHome(it, Integer.parseInt( - lines - .findAll({ it.startsWith("ES_BUILD_JAVA=java") }) - .collect({ it.replace("ES_BUILD_JAVA=java", "").trim() }) - .join("!!") - )) - ) - environment( - 'RUNTIME_JAVA_HOME', - getJavaHome(it, Integer.parseInt( - lines - .findAll({ it.startsWith("ES_RUNTIME_JAVA=java") }) - .collect({ it.replace("ES_RUNTIME_JAVA=java", "").trim() }) - .join("!!") - )) - ) - } + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + executable 'cmd' + args '/C', 'call', new File(checkoutDir, 'gradlew').toString() + } else { + executable new File(checkoutDir, 'gradlew').toString() + } + if (gradle.startParameter.isOffline()) { + args "--offline" + } - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - executable 'cmd' - args '/C', 'call', new File(checkoutDir, 'gradlew').toString() - } else { - executable new File(checkoutDir, 'gradlew').toString() - } - if (gradle.startParameter.isOffline()) { - args "--offline" + args "-Dbuild.snapshot=true" + final LogLevel logLevel = gradle.startParameter.logLevel + if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { + args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" + } + final String showStacktraceName = gradle.startParameter.showStacktrace.name() + assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) + if (showStacktraceName.equals("ALWAYS")) { + args "--stacktrace" + } else if (showStacktraceName.equals("ALWAYS_FULL")) { + args "--full-stacktrace" + } + standardOutput = new IndentingOutputStream(System.out, bwcVersion) + errorOutput = new IndentingOutputStream(System.err, bwcVersion) + configure extraConfig } + } + + createRunBwcGradleTask("buildBwcVersion") { for (String dir : projectDirs) { args ":${dir.replace('/', ':')}:assemble" } - args "-Dbuild.snapshot=true" - final LogLevel logLevel = gradle.startParameter.logLevel - if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { - args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" - } - final String showStacktraceName = gradle.startParameter.showStacktrace.name() - assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) - if (showStacktraceName.equals("ALWAYS")) { - args "--stacktrace" - } else if (showStacktraceName.equals("ALWAYS_FULL")) { - args "--full-stacktrace" - } - standardOutput = new IndentingOutputStream(System.out, bwcVersion) - errorOutput = new IndentingOutputStream(System.err, bwcVersion) doLast { List missing = artifactFiles.values().grep { file -> false == file.exists() } if (false == missing.empty) { - throw new InvalidUserDataException( - "Building ${bwcVersion} didn't generate expected files ${missing}") + throw new InvalidUserDataException("Building ${bwcVersion} didn't generate expected files ${missing}") } } } + createRunBwcGradleTask("resolveAllBwcDependencies") { + args 'resolveAllDependencies' + } + + resolveAllDependencies.dependsOn resolveAllBwcDependencies + for (e in artifactFiles) { String projectName = e.key File artifactFile = e.value From e7f0adb0c5ab49ba64c177cf73e4fa4a9b13b91b Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 25 Jan 2019 14:15:09 +0200 Subject: [PATCH 39/64] Add path sensitivity annotations (#37762) The plugin builder plugin generates warnings for these. There's no immediate impact as we don't have a shared build cache. --- .../elasticsearch/gradle/precommit/ThirdPartyAuditTask.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java index 13cfb8ea61701..8ec979420c0e4 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java @@ -33,6 +33,8 @@ import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.Optional; import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.TaskAction; import org.gradle.process.ExecResult; @@ -86,11 +88,13 @@ public void setTargetCompatibility(JavaVersion targetCompatibility) { } @InputFiles + @PathSensitive(PathSensitivity.NAME_ONLY) public Configuration getForbiddenAPIsConfiguration() { return getProject().getConfigurations().getByName("forbiddenApisCliJar"); } @InputFile + @PathSensitive(PathSensitivity.NONE) public File getSignatureFile() { return signatureFile; } @@ -154,6 +158,7 @@ public Set getMissingClassExcludes() { } @InputFiles + @PathSensitive(PathSensitivity.NAME_ONLY) @SkipWhenEmpty public Set getJarsToScan() { // These are SelfResolvingDependency, and some of them backed by file collections, like the Gradle API files, From 787acb14b91b7065c4ed80167fb4bdb6cc444c86 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 25 Jan 2019 13:45:39 +0100 Subject: [PATCH 40/64] Track total hits up to 10,000 by default (#37466) This commit changes the default for the `track_total_hits` option of the search request to `10,000`. This means that by default search requests will accurately track the total hit count up to `10,000` documents, requests that match more than this value will set the `"total.relation"` to `"gte"` (e.g. greater than or equals) and the `"total.value"` to `10,000` in the search response. Scroll queries are not impacted, they will continue to count the total hits accurately. The default is set back to `true` (accurate hit count) if `rest_total_hits_as_int` is set in the search request. I choose `10,000` as the default because that's also the number we use to limit pagination. This means that users will be able to know how far they can jump (up to 10,000) even if the total number of hits is not accurate. Closes #33028 --- docs/reference/getting-started.asciidoc | 6 +- .../index-modules/index-sorting.asciidoc | 3 +- .../migration/migrate_7_0/search.asciidoc | 31 +++++ .../query-dsl/rank-feature-query.asciidoc | 2 +- .../search/request/track-total-hits.asciidoc | 119 +++++++++--------- docs/reference/search/uri-request.asciidoc | 2 +- .../search/AbstractSearchAsyncAction.java | 8 +- .../action/search/SearchPhaseController.java | 11 +- .../action/search/SearchRequest.java | 6 +- .../common/io/stream/StreamInput.java | 10 ++ .../common/io/stream/StreamOutput.java | 12 ++ .../rest/action/search/RestCountAction.java | 2 +- .../rest/action/search/RestSearchAction.java | 27 ++-- .../elasticsearch/search/SearchService.java | 8 +- .../search/builder/SearchSourceBuilder.java | 22 ++-- .../search/internal/SearchContext.java | 2 +- .../query/EarlyTerminatingCollector.java | 10 ++ .../search/query/TopDocsCollectorContext.java | 9 +- .../search/profile/query/QueryProfilerIT.java | 2 + .../search/RandomSearchRequestGenerator.java | 2 +- .../BatchedDocumentsIteratorTests.java | 3 +- .../authc/esnative/NativeUsersStore.java | 2 + .../authz/store/NativeRolesStore.java | 3 + .../xpack/sql/execution/search/Querier.java | 9 +- 24 files changed, 215 insertions(+), 96 deletions(-) diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index d32eeaff8c719..d8656f7ac4c25 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -793,7 +793,11 @@ As for the response, we see the following parts: * `hits._score` and `max_score` - ignore these fields for now The accuracy of `hits.total` is controlled by the request parameter `track_total_hits`, when set to true -the request will track the total hits accurately (`"relation": "eq"`). +the request will track the total hits accurately (`"relation": "eq"`). It defaults to `10,000` +which means that the total hit count is accurately tracked up to `10,000` documents. +You can force an accurate count by setting `track_total_hits` to true explicitly. +See the <> documentation +for more details. Here is the same exact search above using the alternative request body method: diff --git a/docs/reference/index-modules/index-sorting.asciidoc b/docs/reference/index-modules/index-sorting.asciidoc index b4648dd256d3c..a387deaca6d18 100644 --- a/docs/reference/index-modules/index-sorting.asciidoc +++ b/docs/reference/index-modules/index-sorting.asciidoc @@ -195,7 +195,8 @@ as soon as N documents have been collected per segment. <1> The total number of hits matching the query is unknown because of early termination. -NOTE: Aggregations will collect all documents that match the query regardless of the value of `track_total_hits` +NOTE: Aggregations will collect all documents that match the query regardless +of the value of `track_total_hits` [[index-modules-index-sorting-conjunctions]] === Use index sorting to speed up conjunctions diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index 61cbee851304c..67adf9363406c 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -205,3 +205,34 @@ If `track_total_hits` is set to `false` in the search request the search respons will set `hits.total` to null and the object will not be displayed in the rest layer. You can add `rest_total_hits_as_int=true` in the search request parameters to get the old format back (`"total": -1`). + +[float] +==== `track_total_hits` defaults to 10,000 + +By default search request will count the total hits accurately up to `10,000` +documents. If the total number of hits that match the query is greater than this + value, the response will indicate that the returned value is a lower bound: + +[source,js] +-------------------------------------------------- +{ + "_shards": ... + "timed_out": false, + "took": 100, + "hits": { + "max_score": 1.0, + "total" : { + "value": 10000, <1> + "relation": "gte" <2> + }, + "hits": ... + } +} +-------------------------------------------------- +// NOTCONSOLE + +<1> There are at least 10000 documents that match the query +<2> This is a lower bound (`"gte"`). + +You can force the count to always be accurate by setting `"track_total_hits` +to true explicitly in the search request. \ No newline at end of file diff --git a/docs/reference/query-dsl/rank-feature-query.asciidoc b/docs/reference/query-dsl/rank-feature-query.asciidoc index 277d45f257d02..fe23c5f3ec26f 100644 --- a/docs/reference/query-dsl/rank-feature-query.asciidoc +++ b/docs/reference/query-dsl/rank-feature-query.asciidoc @@ -11,7 +11,7 @@ of the query. Compared to using <> or other ways to modify the score, this query has the benefit of being able to efficiently skip non-competitive hits when -<> is set to `false`. Speedups may be +<> is not set to `true`. Speedups may be spectacular. Here is an example that indexes various features: diff --git a/docs/reference/search/request/track-total-hits.asciidoc b/docs/reference/search/request/track-total-hits.asciidoc index bdad4dbde918e..c416c777366e4 100644 --- a/docs/reference/search/request/track-total-hits.asciidoc +++ b/docs/reference/search/request/track-total-hits.asciidoc @@ -4,9 +4,20 @@ Generally the total hit count can't be computed accurately without visiting all matches, which is costly for queries that match lots of documents. The `track_total_hits` parameter allows you to control how the total number of hits -should be tracked. When set to `true` the search response will always track the -number of hits that match the query accurately (e.g. `total.relation` will always -be equal to `"eq"` when `track_total_hits is set to true). +should be tracked. +Given that it is often enough to have a lower bound of the number of hits, +such as "there are at least 10000 hits", the default is set to `10,000`. +This means that requests will count the total hit accurately up to `10,000` hits. +It's is a good trade off to speed up searches if you don't need the accurate number +of hits after a certain threshold. + +When set to `true` the search response will always track the number of hits that +match the query accurately (e.g. `total.relation` will always be equal to `"eq"` +when `track_total_hits is set to true). Otherwise the `"total.relation"` returned +in the `"total"` object in the search response determines how the `"total.value"` +should be interpreted. A value of `"gte"` means that the `"total.value"` is a +lower bound of the total hits that match the query and a value of `"eq"` indicates +that `"total.value"` is the accurate count. [source,js] -------------------------------------------------- @@ -50,57 +61,9 @@ GET twitter/_search <1> The total number of hits that match the query. <2> The count is accurate (e.g. `"eq"` means equals). -If you don't need to track the total number of hits you can improve query times -by setting this option to `false`. In such case the search can efficiently skip -non-competitive hits because it doesn't need to count all matches: - -[source,js] --------------------------------------------------- -GET twitter/_search -{ - "track_total_hits": false, - "query": { - "match" : { - "message" : "Elasticsearch" - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -\... returns: - -[source,js] --------------------------------------------------- -{ - "_shards": ... - "timed_out": false, - "took": 10, - "hits" : { <1> - "max_score": 1.0, - "hits": ... - } -} --------------------------------------------------- -// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards",/] -// TESTRESPONSE[s/"took": 10/"took": $body.took/] -// TESTRESPONSE[s/"max_score": 1\.0/"max_score": $body.hits.max_score/] -// TESTRESPONSE[s/"hits": \.\.\./"hits": "$body.hits.hits"/] - -<1> The total number of hits is unknown. - -Given that it is often enough to have a lower bound of the number of hits, -such as "there are at least 1000 hits", it is also possible to set -`track_total_hits` as an integer that represents the number of hits to count -accurately. The search can efficiently skip non-competitive document as soon -as collecting at least $`track_total_hits` documents. This is a good trade -off to speed up searches if you don't need the accurate number of hits after -a certain threshold. - - -For instance the following query will track the total hit count that match -the query accurately up to 100 documents: +It is also possible to set `track_total_hits` to an integer. +For instance the following query will accurately track the total hit count that match +the query up to 100 documents: [source,js] -------------------------------------------------- @@ -118,8 +81,8 @@ GET twitter/_search // TEST[continued] The `hits.total.relation` in the response will indicate if the -value returned in `hits.total.value` is accurate (`eq`) or a lower -bound of the total (`gte`). +value returned in `hits.total.value` is accurate (`"eq"`) or a lower +bound of the total (`"gte"`). For instance the following response: @@ -173,4 +136,46 @@ will indicate that the returned value is a lower bound: // TEST[skip:response is already tested in the previous snippet] <1> There are at least 100 documents that match the query -<2> This is a lower bound (`gte`). \ No newline at end of file +<2> This is a lower bound (`"gte"`). + +If you don't need to track the total number of hits at all you can improve query +times by setting this option to `false`: + +[source,js] +-------------------------------------------------- +GET twitter/_search +{ + "track_total_hits": false, + "query": { + "match" : { + "message" : "Elasticsearch" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +\... returns: + +[source,js] +-------------------------------------------------- +{ + "_shards": ... + "timed_out": false, + "took": 10, + "hits" : { <1> + "max_score": 1.0, + "hits": ... + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards",/] +// TESTRESPONSE[s/"took": 10/"took": $body.took/] +// TESTRESPONSE[s/"max_score": 1\.0/"max_score": $body.hits.max_score/] +// TESTRESPONSE[s/"hits": \.\.\./"hits": "$body.hits.hits"/] + +<1> The total number of hits is unknown. + +Finally you can force an accurate count by setting `"track_total_hits"` +to `true` in the request. \ No newline at end of file diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc index 87e1da907fb7d..7bf769c6d7f47 100644 --- a/docs/reference/search/uri-request.asciidoc +++ b/docs/reference/search/uri-request.asciidoc @@ -101,7 +101,7 @@ is important). |`track_scores` |When sorting, set to `true` in order to still track scores and return them as part of each hit. -|`track_total_hits` |Defaults to true. Set to `false` in order to disable the tracking +|`track_total_hits` |Defaults to `10,000`. Set to `false` in order to disable the tracking of the total number of hits that match the query. It also accepts an integer which in this case represents the number of hits to count accurately. diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index d6abbf73e8864..45bfb099f2b71 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -114,9 +114,11 @@ public final void start() { //no search shards to search on, bail with empty response //(it happens with search across _all with no indices around and consistent with broadcast operations) - boolean withTotalHits = request.source() != null ? - // total hits is null in the response if the tracking of total hits is disabled - request.source().trackTotalHitsUpTo() != SearchContext.TRACK_TOTAL_HITS_DISABLED : true; + int trackTotalHitsUpTo = request.source() == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO : + request.source().trackTotalHitsUpTo() == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO : + request.source().trackTotalHitsUpTo(); + // total hits is null in the response if the tracking of total hits is disabled + boolean withTotalHits = trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED; listener.onResponse(new SearchResponse(InternalSearchResponse.empty(withTotalHits), null, 0, 0, 0, buildTookInMillis(), ShardSearchFailure.EMPTY_ARRAY, clusters)); return; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 027d9d5f10c25..67f33398bba68 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -696,6 +696,15 @@ int getNumBuffered() { int getNumReducePhases() { return numReducePhases; } } + private int resolveTrackTotalHits(SearchRequest request) { + if (request.scroll() != null) { + // no matter what the value of track_total_hits is + return SearchContext.TRACK_TOTAL_HITS_ACCURATE; + } + return request.source() == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO : request.source().trackTotalHitsUpTo() == null ? + SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO : request.source().trackTotalHitsUpTo(); + } + /** * Returns a new ArraySearchPhaseResults instance. This might return an instance that reduces search responses incrementally. */ @@ -704,7 +713,7 @@ InitialSearchPhase.ArraySearchPhaseResults newSearchPhaseResu boolean isScrollRequest = request.scroll() != null; final boolean hasAggs = source != null && source.aggregations() != null; final boolean hasTopDocs = source == null || source.size() != 0; - final int trackTotalHitsUpTo = source == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO : source.trackTotalHitsUpTo(); + final int trackTotalHitsUpTo = resolveTrackTotalHits(request); final boolean finalReduce = request.getLocalClusterAlias() == null; if (isScrollRequest == false && (hasAggs || hasTopDocs)) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 69b090fb89a5a..020887068f015 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -222,7 +223,10 @@ public void writeTo(StreamOutput out) throws IOException { public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; final Scroll scroll = scroll(); - if (source != null && source.trackTotalHits() == false && scroll != null) { + if (source != null + && source.trackTotalHitsUpTo() != null + && source.trackTotalHitsUpTo() != SearchContext.TRACK_TOTAL_HITS_ACCURATE + && scroll != null) { validationException = addValidationError("disabling [track_total_hits] is not allowed in a scroll context", validationException); } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 723de8fd5da31..dde71ad68e17f 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -204,6 +204,16 @@ public int readInt() throws IOException { | ((readByte() & 0xFF) << 8) | (readByte() & 0xFF); } + /** + * Reads an optional {@link Integer}. + */ + public Integer readOptionalInt() throws IOException { + if (readBoolean()) { + return readInt(); + } + return null; + } + /** * Reads an int stored in variable-length format. Reads between one and * five bytes. Smaller values take fewer bytes. Negative numbers diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index e9709de1a44a3..3031e2f2e7164 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -323,6 +323,18 @@ public void writeOptionalString(@Nullable String str) throws IOException { } } + /** + * Writes an optional {@link Integer}. + */ + public void writeOptionalInt(@Nullable Integer integer) throws IOException { + if (integer == null) { + writeBoolean(false); + } else { + writeBoolean(true); + writeInt(integer); + } + } + public void writeOptionalVInt(@Nullable Integer integer) throws IOException { if (integer == null) { writeBoolean(false); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java index 04d13133f0841..ecdd34ca07c88 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestCountAction.java @@ -72,7 +72,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { SearchRequest countRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index"))); countRequest.indicesOptions(IndicesOptions.fromRequest(request, countRequest.indicesOptions())); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0).trackTotalHits(true); countRequest.source(searchSourceBuilder); request.withContentOrSourceParamParserOrNull(parser -> { if (parser == null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index da773efed580d..78082dd364173 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -173,6 +173,7 @@ public static void parseSearchRequest(SearchRequest searchRequest, RestRequest r searchRequest.routing(request.param("routing")); searchRequest.preference(request.param("preference")); searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); + checkRestTotalHits(request, searchRequest); } @@ -240,6 +241,7 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false)); } + if (request.hasParam("track_total_hits")) { if (Booleans.isBoolean(request.param("track_total_hits"))) { searchSourceBuilder.trackTotalHits( @@ -289,17 +291,26 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil } /** - * Throws an {@link IllegalArgumentException} if {@link #TOTAL_HITS_AS_INT_PARAM} - * is used in conjunction with a lower bound value for the track_total_hits option. + * Modify the search request to accurately count the total hits that match the query + * if {@link #TOTAL_HITS_AS_INT_PARAM} is set. + * + * @throws IllegalArgumentException if {@link #TOTAL_HITS_AS_INT_PARAM} + * is used in conjunction with a lower bound value (other than {@link SearchContext#DEFAULT_TRACK_TOTAL_HITS_UP_TO}) + * for the track_total_hits option. */ public static void checkRestTotalHits(RestRequest restRequest, SearchRequest searchRequest) { - int trackTotalHitsUpTo = searchRequest.source() == null ? - SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO : searchRequest.source().trackTotalHitsUpTo(); - if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_ACCURATE || - trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) { - return ; + boolean totalHitsAsInt = restRequest.paramAsBoolean(TOTAL_HITS_AS_INT_PARAM, false); + if (totalHitsAsInt == false) { + return; + } + if (searchRequest.source() == null) { + searchRequest.source(new SearchSourceBuilder()); } - if (restRequest.paramAsBoolean(TOTAL_HITS_AS_INT_PARAM, false)) { + Integer trackTotalHitsUpTo = searchRequest.source().trackTotalHitsUpTo(); + if (trackTotalHitsUpTo == null) { + searchRequest.source().trackTotalHits(true); + } else if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_ACCURATE + && trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { throw new IllegalArgumentException("[" + TOTAL_HITS_AS_INT_PARAM + "] cannot be used " + "if the tracking of total hits is not accurate, got " + trackTotalHitsUpTo); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 5e2758eb5b83c..ef255c8af7ad1 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -775,10 +775,14 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc } } context.trackScores(source.trackScores()); - if (source.trackTotalHits() == false && context.scrollContext() != null) { + if (source.trackTotalHitsUpTo() != null + && source.trackTotalHitsUpTo() != SearchContext.TRACK_TOTAL_HITS_ACCURATE + && context.scrollContext() != null) { throw new SearchContextException(context, "disabling [track_total_hits] is not allowed in a scroll context"); } - context.trackTotalHitsUpTo(source.trackTotalHitsUpTo()); + if (source.trackTotalHitsUpTo() != null) { + context.trackTotalHitsUpTo(source.trackTotalHitsUpTo()); + } if (source.minScore() != null) { context.minimumScore(source.minScore()); } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 81dd84ad8e48b..f5c99fc513759 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -69,7 +69,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; -import static org.elasticsearch.search.internal.SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO; import static org.elasticsearch.search.internal.SearchContext.TRACK_TOTAL_HITS_ACCURATE; import static org.elasticsearch.search.internal.SearchContext.TRACK_TOTAL_HITS_DISABLED; @@ -158,7 +157,7 @@ public static HighlightBuilder highlight() { private boolean trackScores = false; - private int trackTotalHitsUpTo = DEFAULT_TRACK_TOTAL_HITS_UP_TO; + private Integer trackTotalHitsUpTo; private SearchAfterBuilder searchAfterBuilder; @@ -261,7 +260,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { sliceBuilder = in.readOptionalWriteable(SliceBuilder::new); collapse = in.readOptionalWriteable(CollapseBuilder::new); if (in.getVersion().onOrAfter(Version.V_7_0_0)) { - trackTotalHitsUpTo = in.readInt(); + trackTotalHitsUpTo = in.readOptionalInt(); } else { trackTotalHitsUpTo = in.readBoolean() ? TRACK_TOTAL_HITS_ACCURATE : TRACK_TOTAL_HITS_DISABLED; } @@ -327,9 +326,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(sliceBuilder); out.writeOptionalWriteable(collapse); if (out.getVersion().onOrAfter(Version.V_7_0_0)) { - out.writeInt(trackTotalHitsUpTo); + out.writeOptionalInt(trackTotalHitsUpTo); } else { - out.writeBoolean(trackTotalHitsUpTo > SearchContext.TRACK_TOTAL_HITS_DISABLED); + out.writeBoolean(trackTotalHitsUpTo == null ? true : trackTotalHitsUpTo > SearchContext.TRACK_TOTAL_HITS_DISABLED); } } @@ -568,16 +567,17 @@ public boolean trackScores() { /** * Indicates if the total hit count for the query should be tracked. */ - public boolean trackTotalHits() { - return trackTotalHitsUpTo == TRACK_TOTAL_HITS_ACCURATE; - } - public SearchSourceBuilder trackTotalHits(boolean trackTotalHits) { this.trackTotalHitsUpTo = trackTotalHits ? TRACK_TOTAL_HITS_ACCURATE : TRACK_TOTAL_HITS_DISABLED; return this; } - public int trackTotalHitsUpTo() { + /** + * Returns the total hit count that should be tracked or null if the value is unset. + * Defaults to null. + */ + @Nullable + public Integer trackTotalHitsUpTo() { return trackTotalHitsUpTo; } @@ -1289,7 +1289,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(TRACK_SCORES_FIELD.getPreferredName(), true); } - if (trackTotalHitsUpTo != SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO) { + if (trackTotalHitsUpTo != null) { builder.field(TRACK_TOTAL_HITS_FIELD.getPreferredName(), trackTotalHitsUpTo); } diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index bd6d9c501c8d1..2c2aedfcf7484 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -84,7 +84,7 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas public static final int DEFAULT_TERMINATE_AFTER = 0; public static final int TRACK_TOTAL_HITS_ACCURATE = Integer.MAX_VALUE; public static final int TRACK_TOTAL_HITS_DISABLED = -1; - public static final int DEFAULT_TRACK_TOTAL_HITS_UP_TO = TRACK_TOTAL_HITS_ACCURATE; + public static final int DEFAULT_TRACK_TOTAL_HITS_UP_TO = 10000; private Map> clearables = null; private final AtomicBoolean closed = new AtomicBoolean(false); diff --git a/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java b/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java index 8b17437740cdf..2cfcdf1ae669d 100644 --- a/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java +++ b/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java @@ -41,6 +41,7 @@ static final class EarlyTerminationException extends RuntimeException { private final int maxCountHits; private int numCollected; private boolean forceTermination; + private boolean earlyTerminated; /** * Ctr @@ -58,6 +59,7 @@ static final class EarlyTerminationException extends RuntimeException { @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { if (numCollected >= maxCountHits) { + earlyTerminated = true; if (forceTermination) { throw new EarlyTerminationException("early termination [CountBased]"); } else { @@ -68,6 +70,7 @@ public LeafCollector getLeafCollector(LeafReaderContext context) throws IOExcept @Override public void collect(int doc) throws IOException { if (++numCollected > maxCountHits) { + earlyTerminated = true; if (forceTermination) { throw new EarlyTerminationException("early termination [CountBased]"); } else { @@ -78,4 +81,11 @@ public void collect(int doc) throws IOException { }; }; } + + /** + * Returns true if this collector has early terminated. + */ + public boolean hasEarlyTerminated() { + return earlyTerminated; + } } diff --git a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java index 2314d11e7e387..1ccc8f4cb92db 100644 --- a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java @@ -112,8 +112,11 @@ private EmptyTopDocsCollectorContext(IndexReader reader, Query query, this.collector = hitCountCollector; this.hitCountSupplier = () -> new TotalHits(hitCountCollector.getTotalHits(), TotalHits.Relation.EQUAL_TO); } else { - this.collector = new EarlyTerminatingCollector(hitCountCollector, trackTotalHitsUpTo, false); - this.hitCountSupplier = () -> new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO); + EarlyTerminatingCollector col = + new EarlyTerminatingCollector(hitCountCollector, trackTotalHitsUpTo, false); + this.collector = col; + this.hitCountSupplier = () -> new TotalHits(hitCountCollector.getTotalHits(), + col.hasEarlyTerminated() ? TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO : TotalHits.Relation.EQUAL_TO); } } else { this.collector = new EarlyTerminatingCollector(hitCountCollector, 0, false); @@ -157,7 +160,7 @@ private CollapsingTopDocsCollectorContext(CollapseContext collapseContext, this.sortFmt = sortAndFormats == null ? new DocValueFormat[] { DocValueFormat.RAW } : sortAndFormats.formats; this.topDocsCollector = collapseContext.createTopDocs(sort, numHits); - MaxScoreCollector maxScoreCollector = null; + MaxScoreCollector maxScoreCollector; if (trackMaxScore) { maxScoreCollector = new MaxScoreCollector(); maxScoreSupplier = maxScoreCollector::getMaxScore; diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index 14686aff209c7..27b1157766999 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -53,6 +53,7 @@ public class QueryProfilerIT extends ESIntegTestCase { * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, * constructs 20-100 random queries and tries to profile them */ + @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-8658") public void testProfileQuery() throws Exception { createIndex("test"); ensureGreen(); @@ -79,6 +80,7 @@ public void testProfileQuery() throws Exception { SearchResponse resp = client().prepareSearch() .setQuery(q) + .setTrackTotalHits(true) .setProfile(true) .setSearchType(SearchType.QUERY_THEN_FETCH) .get(); diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java index 6ec2732aaf915..58dbe869b5c71 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -165,7 +165,7 @@ public static SearchSourceBuilder randomSearchSourceBuilder( builder.trackTotalHits(randomBoolean()); } else { builder.trackTotalHitsUpTo( - randomIntBetween(SearchContext.TRACK_TOTAL_HITS_DISABLED, SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO) + randomIntBetween(SearchContext.TRACK_TOTAL_HITS_DISABLED, SearchContext.TRACK_TOTAL_HITS_ACCURATE) ); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java index c301a0b369787..0024eb5f8c648 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIteratorTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.test.SearchHitBuilder; import org.junit.Before; @@ -139,7 +140,7 @@ private void assertSearchRequest() { assertThat(searchRequest.scroll().keepAlive(), equalTo(TimeValue.timeValueMinutes(5))); assertThat(searchRequest.types().length, equalTo(0)); assertThat(searchRequest.source().query(), equalTo(QueryBuilders.matchAllQuery())); - assertThat(searchRequest.source().trackTotalHits(), is(true)); + assertThat(searchRequest.source().trackTotalHitsUpTo(), is(SearchContext.TRACK_TOTAL_HITS_ACCURATE)); } private void assertSearchScrollRequests(int expectedCount) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 47c32489ae1f4..3a6fb0ea3b341 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -175,6 +175,7 @@ void getUserCount(final ActionListener listener) { client.prepareSearch(SECURITY_INDEX_NAME) .setQuery(QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), USER_DOC_TYPE)) .setSize(0) + .setTrackTotalHits(true) .request(), new ActionListener() { @Override @@ -578,6 +579,7 @@ void getAllReservedUserInfo(ActionListener> listen securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareSearch(SECURITY_INDEX_NAME) + .setTrackTotalHits(true) .setQuery(QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE)) .setFetchSource(true).request(), new ActionListener() { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index a36f830ceacbc..cbc66235d305b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -253,6 +253,7 @@ public void usageStats(ActionListener> listener) { client.prepareMultiSearch() .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) .setQuery(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) + .setTrackTotalHits(true) .setSize(0)) .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) .setQuery(QueryBuilders.boolQuery() @@ -262,12 +263,14 @@ public void usageStats(ActionListener> listener) { .should(existsQuery("indices.field_security.except")) // for backwardscompat with 2.x .should(existsQuery("indices.fields")))) + .setTrackTotalHits(true) .setSize(0) .setTerminateAfter(1)) .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) .setQuery(QueryBuilders.boolQuery() .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) .filter(existsQuery("indices.query"))) + .setTrackTotalHits(true) .setSize(0) .setTerminateAfter(1)) .request(), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 16a6a4135b472..ff02ed85818fe 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -111,8 +111,13 @@ public void query(Schema schema, QueryContainer query, String index, ActionListe } public static SearchRequest prepareRequest(Client client, SearchSourceBuilder source, TimeValue timeout, String... indices) { - SearchRequest search = client.prepareSearch(indices).setSource(source).setTimeout(timeout).request(); - search.allowPartialSearchResults(false); + SearchRequest search = client.prepareSearch(indices) + // always track total hits accurately + .setTrackTotalHits(true) + .setAllowPartialSearchResults(false) + .setSource(source) + .setTimeout(timeout) + .request(); return search; } From 9e350d027e0a908b7e8a54fbd73141669cf48641 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Fri, 25 Jan 2019 13:50:19 +0100 Subject: [PATCH 41/64] Add BWC compatible processing to ingest date processors (#37407) The ingest date processor is currently only able to parse joda formats. However it is not using the existing elasticsearch classes but access joda directly. This means that our existing BWC layer does not notify the user about deprecated formats. This commit switches to use the exising Elasticsearch Joda methods to acquire a date format, that includes the BWC check and the ability to parse java 8 dates. The date parsing in ingest has also another extra feature, that the fallback year, when a date format without a year is used, is the current year, and not 1970 like usual. This is currently not properly supported in the DateFormatter class. As this is the only case for this feature and java time can take care of this using the toZonedDateTime() method, a workaround just for the joda time parser has been created, that can be removed soon again from 7.0. --- .../ingest/common/DateFormat.java | 41 ++++++++++++++++--- .../ingest/common/DateFormatTests.java | 12 +++--- .../common/DateIndexNameProcessorTests.java | 2 +- .../ingest/common/DateProcessorTests.java | 5 ++- .../test/ingest/20_combine_processors.yml | 7 ++++ .../org/elasticsearch/common/joda/Joda.java | 13 +++--- .../common/joda/JodaDateFormatter.java | 19 +++++++-- 7 files changed, 75 insertions(+), 24 deletions(-) diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java index bf664afb40777..220091c4baadc 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java @@ -19,12 +19,19 @@ package org.elasticsearch.ingest.common; +import org.elasticsearch.common.joda.Joda; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateUtils; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.ISODateTimeFormat; +import java.time.Instant; +import java.time.LocalDate; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.temporal.TemporalAccessor; import java.util.Locale; import java.util.function.Function; @@ -63,11 +70,33 @@ private long parseMillis(String date) { return ((base * 1000) - 10000) + (rest/1000000); } }, - Joda { + Java { @Override Function getFunction(String format, DateTimeZone timezone, Locale locale) { - DateTimeFormatter parser = DateTimeFormat.forPattern(format).withZone(timezone).withLocale(locale); - return text -> parser.withDefaultYear((new DateTime(DateTimeZone.UTC)).getYear()).parseDateTime(text); + // in case you are wondering why we do not call 'DateFormatter.forPattern(format)' for all cases here, but only for the + // non java time case: + // When the joda date formatter parses a date then a year is always set, so that no fallback can be used, like + // done in the JodaDateFormatter.withYear() code below + // This means that we leave the existing parsing logic in place, but will fall back to the new java date parsing logic, if an + // "8" is prepended to the date format string + int year = LocalDate.now(ZoneOffset.UTC).getYear(); + if (format.startsWith("8")) { + DateFormatter formatter = DateFormatter.forPattern(format) + .withLocale(locale) + .withZone(DateUtils.dateTimeZoneToZoneId(timezone)); + return text -> { + ZonedDateTime defaultZonedDateTime = Instant.EPOCH.atZone(ZoneOffset.UTC).withYear(year); + TemporalAccessor accessor = formatter.parse(text); + long millis = DateFormatters.toZonedDateTime(accessor, defaultZonedDateTime).toInstant().toEpochMilli(); + return new DateTime(millis, timezone); + }; + } else { + DateFormatter formatter = Joda.forPattern(format) + .withYear(year) + .withZone(DateUtils.dateTimeZoneToZoneId(timezone)) + .withLocale(locale); + return text -> new DateTime(formatter.parseMillis(text), timezone); + } } }; @@ -84,7 +113,7 @@ static DateFormat fromString(String format) { case "TAI64N": return Tai64n; default: - return Joda; + return Java; } } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java index 415ee8720930b..27904a5586e7e 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java @@ -34,7 +34,7 @@ public class DateFormatTests extends ESTestCase { public void testParseJoda() { - Function jodaFunction = DateFormat.Joda.getFunction("MMM dd HH:mm:ss Z", + Function jodaFunction = DateFormat.Java.getFunction("MMM dd HH:mm:ss Z", DateTimeZone.forOffsetHours(-8), Locale.ENGLISH); assertThat(Instant.ofEpochMilli(jodaFunction.apply("Nov 24 01:29:01 -0800").getMillis()) .atZone(ZoneId.of("GMT-8")) @@ -78,13 +78,13 @@ public void testTAI64NParse() { public void testFromString() { assertThat(DateFormat.fromString("UNIX_MS"), equalTo(DateFormat.UnixMs)); - assertThat(DateFormat.fromString("unix_ms"), equalTo(DateFormat.Joda)); + assertThat(DateFormat.fromString("unix_ms"), equalTo(DateFormat.Java)); assertThat(DateFormat.fromString("UNIX"), equalTo(DateFormat.Unix)); - assertThat(DateFormat.fromString("unix"), equalTo(DateFormat.Joda)); + assertThat(DateFormat.fromString("unix"), equalTo(DateFormat.Java)); assertThat(DateFormat.fromString("ISO8601"), equalTo(DateFormat.Iso8601)); - assertThat(DateFormat.fromString("iso8601"), equalTo(DateFormat.Joda)); + assertThat(DateFormat.fromString("iso8601"), equalTo(DateFormat.Java)); assertThat(DateFormat.fromString("TAI64N"), equalTo(DateFormat.Tai64n)); - assertThat(DateFormat.fromString("tai64n"), equalTo(DateFormat.Joda)); - assertThat(DateFormat.fromString("prefix-" + randomAlphaOfLengthBetween(1, 10)), equalTo(DateFormat.Joda)); + assertThat(DateFormat.fromString("tai64n"), equalTo(DateFormat.Java)); + assertThat(DateFormat.fromString("prefix-" + randomAlphaOfLengthBetween(1, 10)), equalTo(DateFormat.Java)); } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java index c97da116e3489..6555628f1da15 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java @@ -35,7 +35,7 @@ public class DateIndexNameProcessorTests extends ESTestCase { public void testJodaPattern() throws Exception { - Function function = DateFormat.Joda.getFunction("yyyy-MM-dd'T'HH:mm:ss.SSSZ", DateTimeZone.UTC, Locale.ROOT); + Function function = DateFormat.Java.getFunction("yyyy-MM-dd'T'HH:mm:ss.SSSZ", DateTimeZone.UTC, Locale.ROOT); DateIndexNameProcessor processor = createProcessor("_field", Collections.singletonList(function), DateTimeZone.UTC, "events-", "y", "yyyyMMdd"); IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java index 23aac797859e7..0f31143c43d0e 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java @@ -109,7 +109,7 @@ public void testInvalidJodaPattern() { fail("date processor execution should have failed"); } catch(IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("unable to parse date [2010]")); - assertThat(e.getCause().getMessage(), equalTo("Illegal pattern component: i")); + assertThat(e.getCause().getMessage(), equalTo("Invalid format: [invalid pattern]: Illegal pattern component: i")); } } @@ -127,9 +127,10 @@ public void testJodaPatternLocale() { } public void testJodaPatternDefaultYear() { + String format = randomFrom("dd/MM", "8dd/MM"); DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), templatize(ZoneId.of("Europe/Amsterdam")), templatize(Locale.ENGLISH), - "date_as_string", Collections.singletonList("dd/MM"), "date_as_date"); + "date_as_string", Collections.singletonList(format), "date_as_date"); Map document = new HashMap<>(); document.put("date_as_string", "12/06"); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml index 85fa6db10ed17..f44e6ae5753a0 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml @@ -1,5 +1,10 @@ --- "Test logging": + - skip: + version: " - 6.9.99" + reason: pre-7.0.0 will send no warnings + features: "warnings" + - do: ingest.put_pipeline: id: "_id" @@ -41,6 +46,8 @@ - match: { acknowledged: true } - do: + warnings: + - "Use of 'Y' (year-of-era) will change to 'y' in the next major version of Elasticsearch. Prefix your date format with '8' to use the new specifier." index: index: test type: test diff --git a/server/src/main/java/org/elasticsearch/common/joda/Joda.java b/server/src/main/java/org/elasticsearch/common/joda/Joda.java index 3c99b65a9a54f..45587f6bb3df9 100644 --- a/server/src/main/java/org/elasticsearch/common/joda/Joda.java +++ b/server/src/main/java/org/elasticsearch/common/joda/Joda.java @@ -107,8 +107,8 @@ public static JodaDateFormatter forPattern(String input) { // in this case, we have a separate parser and printer since the dataOptionalTimeParser can't print // this sucks we should use the root local by default and not be dependent on the node return new JodaDateFormatter(input, - ISODateTimeFormat.dateOptionalTimeParser().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC), - ISODateTimeFormat.dateTime().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC)); + ISODateTimeFormat.dateOptionalTimeParser().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC).withDefaultYear(1970), + ISODateTimeFormat.dateTime().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC).withDefaultYear(1970)); } else if ("dateTime".equals(input) || "date_time".equals(input)) { formatter = ISODateTimeFormat.dateTime(); } else if ("dateTimeNoMillis".equals(input) || "date_time_no_millis".equals(input)) { @@ -184,8 +184,9 @@ public static JodaDateFormatter forPattern(String input) { // in this case, we have a separate parser and printer since the dataOptionalTimeParser can't print // this sucks we should use the root local by default and not be dependent on the node return new JodaDateFormatter(input, - StrictISODateTimeFormat.dateOptionalTimeParser().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC), - StrictISODateTimeFormat.dateTime().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC)); + StrictISODateTimeFormat.dateOptionalTimeParser().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC) + .withDefaultYear(1970), + StrictISODateTimeFormat.dateTime().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC).withDefaultYear(1970)); } else if ("strictDateTime".equals(input) || "strict_date_time".equals(input)) { formatter = StrictISODateTimeFormat.dateTime(); } else if ("strictDateTimeNoMillis".equals(input) || "strict_date_time_no_millis".equals(input)) { @@ -262,7 +263,7 @@ public static JodaDateFormatter forPattern(String input) { } } - formatter = formatter.withLocale(Locale.ROOT).withZone(DateTimeZone.UTC); + formatter = formatter.withLocale(Locale.ROOT).withZone(DateTimeZone.UTC).withDefaultYear(1970); return new JodaDateFormatter(input, formatter, formatter); } @@ -311,7 +312,7 @@ public static DateFormatter getStrictStandardDateFormatter() { DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(longFormatter.withZone(DateTimeZone.UTC).getPrinter(), new DateTimeParser[]{longFormatter.getParser(), shortFormatter.getParser(), new EpochTimeParser(true)}); - DateTimeFormatter formatter = builder.toFormatter().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC); + DateTimeFormatter formatter = builder.toFormatter().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC).withDefaultYear(1970); return new JodaDateFormatter("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis", formatter, formatter); } diff --git a/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java index 706e995530962..1b428fdac3a20 100644 --- a/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java @@ -39,10 +39,10 @@ public class JodaDateFormatter implements DateFormatter { final DateTimeFormatter parser; final DateTimeFormatter printer; - public JodaDateFormatter(String pattern, DateTimeFormatter parser, DateTimeFormatter printer) { + JodaDateFormatter(String pattern, DateTimeFormatter parser, DateTimeFormatter printer) { this.pattern = pattern; - this.printer = printer.withDefaultYear(1970); - this.parser = parser.withDefaultYear(1970); + this.printer = printer; + this.parser = parser; } @Override @@ -62,6 +62,9 @@ public DateTime parseJoda(String input) { @Override public DateFormatter withZone(ZoneId zoneId) { DateTimeZone timeZone = DateUtils.zoneIdToDateTimeZone(zoneId); + if (parser.getZone().equals(timeZone)) { + return this; + } DateTimeFormatter parser = this.parser.withZone(timeZone); DateTimeFormatter printer = this.printer.withZone(timeZone); return new JodaDateFormatter(pattern, parser, printer); @@ -69,6 +72,9 @@ public DateFormatter withZone(ZoneId zoneId) { @Override public DateFormatter withLocale(Locale locale) { + if (parser.getLocale().equals(locale)) { + return this; + } DateTimeFormatter parser = this.parser.withLocale(locale); DateTimeFormatter printer = this.printer.withLocale(locale); return new JodaDateFormatter(pattern, parser, printer); @@ -89,6 +95,13 @@ public String formatMillis(long millis) { return printer.print(millis); } + public JodaDateFormatter withYear(int year) { + if (parser.getDefaultYear() == year) { + return this; + } + return new JodaDateFormatter(pattern, parser.withDefaultYear(year), printer.withDefaultYear(year)); + } + @Override public String pattern() { return pattern; From cb451edb0147db2d8d643ccc21eaa33413831e1b Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 25 Jan 2019 14:00:39 +0100 Subject: [PATCH 42/64] Allow nested fields in the composite aggregation (#37178) This changes adds the support to handle `nested` fields in the `composite` aggregation. A `nested` aggregation can be used as parent of a `composite` aggregation in order to target `nested` fields in the `sources`. Closes #28611 --- .../bucket/composite-aggregation.asciidoc | 20 ++++- .../test/search.aggregation/230_composite.yml | 77 ++++++++++++++++++- .../CompositeAggregationBuilder.java | 22 +++++- .../nested/NestedAggregatorFactory.java | 2 +- 4 files changed, 112 insertions(+), 9 deletions(-) diff --git a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc index eb56fa6f8500c..6d09379e16993 100644 --- a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc @@ -31,7 +31,24 @@ PUT /sales }, "shop": { "type": "keyword" - } + }, + "nested": { + "type": "nested", + "properties": { + "product": { + "type": "keyword" + }, + "timestamp": { + "type": "date" + }, + "price": { + "type": "long" + }, + "shop": { + "type": "keyword" + } + } + } } } } @@ -287,7 +304,6 @@ GET /_search -------------------------------------------------- // CONSOLE - This will create composite buckets from the values created by two values source, a `date_histogram` and a `terms`. Each bucket is composed of two values, one for each value source defined in the aggregation. Any type of combinations is allowed and the order in the array is preserved diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 325bdf8f18e22..73bf44cb5d589 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -13,13 +13,18 @@ setup: type: keyword long: type: long + nested: + type: nested + properties: + nested_long: + type: long - do: index: index: test type: doc id: 1 - body: { "keyword": "foo", "long": [10, 20] } + body: { "keyword": "foo", "long": [10, 20], "nested": [{"nested_long": 10}, {"nested_long": 20}] } - do: index: @@ -33,14 +38,14 @@ setup: index: test type: doc id: 3 - body: { "keyword": "bar", "long": [100, 0] } + body: { "keyword": "bar", "long": [100, 0], "nested": [{"nested_long": 10}, {"nested_long": 0}] } - do: index: index: test type: doc id: 4 - body: { "keyword": "bar", "long": [1000, 0] } + body: { "keyword": "bar", "long": [1000, 0], "nested": [{"nested_long": 1000}, {"nested_long": 20}] } - do: index: @@ -66,7 +71,6 @@ setup: version: " - 6.0.99" reason: this uses a new API that has been added in 6.1 - - do: search: rest_total_hits_as_int: true @@ -357,3 +361,68 @@ setup: } } ] + +--- +"Composite aggregation with nested parent": + - skip: + version: " - 6.99.99" + reason: the ability to set a nested parent aggregation was added in 7.0. + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + 1: + nested: + path: nested + aggs: + 2: + composite: + sources: [ + "nested": { + "terms": { + "field": "nested.nested_long" + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.1.2.buckets: 4 } + - match: { aggregations.1.2.buckets.0.key.nested: 0 } + - match: { aggregations.1.2.buckets.0.doc_count: 1 } + - match: { aggregations.1.2.buckets.1.key.nested: 10 } + - match: { aggregations.1.2.buckets.1.doc_count: 2 } + - match: { aggregations.1.2.buckets.2.key.nested: 20 } + - match: { aggregations.1.2.buckets.2.doc_count: 2 } + - match: { aggregations.1.2.buckets.3.key.nested: 1000 } + - match: { aggregations.1.2.buckets.3.doc_count: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + 1: + nested: + path: nested + aggs: + 2: + composite: + after: { "nested": 10 } + sources: [ + "nested": { + "terms": { + "field": "nested.nested_long" + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.1.2.buckets: 2 } + - match: { aggregations.1.2.buckets.0.key.nested: 20 } + - match: { aggregations.1.2.buckets.0.doc_count: 2 } + - match: { aggregations.1.2.buckets.1.key.nested: 1000 } + - match: { aggregations.1.2.buckets.1.doc_count: 1 } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index 43e33fad93189..69910d21ed8ad 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregatorFactory; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -151,11 +152,28 @@ public int size() { return size; } + /** + * Returns null if the provided factory and his parents are compatible with + * this aggregator or the instance of the parent's factory that is incompatible with + * the composite aggregation. + */ + private AggregatorFactory checkParentIsNullOrNested(AggregatorFactory factory) { + if (factory == null) { + return null; + } else if (factory instanceof NestedAggregatorFactory) { + return checkParentIsNullOrNested(factory.getParent()); + } else { + return factory; + } + } + @Override protected AggregatorFactory doBuild(SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subfactoriesBuilder) throws IOException { - if (parent != null) { - throw new IllegalArgumentException("[composite] aggregation cannot be used with a parent aggregation"); + AggregatorFactory invalid = checkParentIsNullOrNested(parent); + if (invalid != null) { + throw new IllegalArgumentException("[composite] aggregation cannot be used with a parent aggregation of" + + " type: [" + invalid.getClass().getSimpleName() + "]"); } CompositeValuesSourceConfig[] configs = new CompositeValuesSourceConfig[sources.size()]; for (int i = 0; i < configs.length; i++) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java index dfbe18ba87b4f..6724ee7da30d6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java @@ -32,7 +32,7 @@ import java.util.List; import java.util.Map; -class NestedAggregatorFactory extends AggregatorFactory { +public class NestedAggregatorFactory extends AggregatorFactory { private final ObjectMapper parentObjectMapper; private final ObjectMapper childObjectMapper; From deafce1acda47f5d59de8ec2ec55b43ba986586f Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 25 Jan 2019 13:15:35 +0000 Subject: [PATCH 43/64] [ML] No need to add state doc mapping on job open in 7.x (#37759) When upgrading from 5.4 to 5.5 to 6.7 (inclusive) it was necessary to ensure there was a mapping for type "doc" on the ML state index before opening a job. This was because 5.4 created a multi-type ML state index. In version 7.x we can be sure that any such 5.4 index is no longer in use. It would have had to be reindexed into the 6.x index format prior to the upgrade to version 7.x. --- .../xpack/ml/action/TransportOpenJobAction.java | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index a5aed9b5b5957..820da6a621356 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -49,7 +49,6 @@ import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; @@ -459,19 +458,11 @@ public void onFailure(Exception e) { ); // Tell the job tracker to refresh the memory requirement for this job and all other jobs that have persistent tasks - ActionListener jobUpdateListener = ActionListener.wrap( + ActionListener getJobHandler = ActionListener.wrap( response -> memoryTracker.refreshJobMemoryAndAllOthers(jobParams.getJobId(), memoryRequirementRefreshListener), listener::onFailure ); - // Try adding state doc mapping - ActionListener getJobHandler = ActionListener.wrap( - response -> { - ElasticsearchMappings.addDocMappingIfMissing(AnomalyDetectorsIndex.jobStateIndexWriteAlias(), - ElasticsearchMappings::stateMapping, client, state, jobUpdateListener); - }, listener::onFailure - ); - // Get the job config jobConfigProvider.getJob(jobParams.getJobId(), ActionListener.wrap( builder -> { From 49073dd2f680c0fde81c810b8da28a0af33f63e5 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Fri, 25 Jan 2019 14:22:48 +0100 Subject: [PATCH 44/64] Fail start on invalid index metadata (#37748) Node started with node.data=false and node.master=false can no longer start if they have index metadata. This avoids resurrecting old indexes into the cluster and ensures metadata is cleaned out before re-purposing a node that was previously master or data node. Issue #27073 --- .../elasticsearch/env/NodeEnvironment.java | 44 +++++++++--- .../elasticsearch/env/NodeEnvironmentIT.java | 28 +++++++- .../env/NodeEnvironmentTests.java | 67 ++++++++++++++++--- 3 files changed, 119 insertions(+), 20 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 2f676eb846770..397d1ee1763dd 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -312,6 +312,10 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce } if (DiscoveryNode.isDataNode(settings) == false) { + if (DiscoveryNode.isMasterNode(settings) == false) { + ensureNoIndexMetaData(nodePaths); + } + ensureNoShardData(nodePaths); } @@ -1037,7 +1041,29 @@ private static void ensureAtomicMoveSupported(final NodePath[] nodePaths) throws } private void ensureNoShardData(final NodePath[] nodePaths) throws IOException { - List shardDataPaths = new ArrayList<>(); + List shardDataPaths = collectIndexSubPaths(nodePaths, this::isShardPath); + if (shardDataPaths.isEmpty() == false) { + throw new IllegalStateException("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false, but has shard data: " + + shardDataPaths); + } + } + + private void ensureNoIndexMetaData(final NodePath[] nodePaths) throws IOException { + List indexMetaDataPaths = collectIndexSubPaths(nodePaths, this::isIndexMetaDataPath); + if (indexMetaDataPaths.isEmpty() == false) { + throw new IllegalStateException("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false and " + + Node.NODE_MASTER_SETTING.getKey() + + "=false, but has index metadata: " + + indexMetaDataPaths); + } + } + + private List collectIndexSubPaths(NodePath[] nodePaths, Predicate subPathPredicate) throws IOException { + List indexSubPaths = new ArrayList<>(); for (NodePath nodePath : nodePaths) { Path indicesPath = nodePath.indicesPath; if (Files.isDirectory(indicesPath)) { @@ -1045,9 +1071,9 @@ private void ensureNoShardData(final NodePath[] nodePaths) throws IOException { for (Path indexPath : indexStream) { if (Files.isDirectory(indexPath)) { try (Stream shardStream = Files.list(indexPath)) { - shardStream.filter(this::isShardPath) + shardStream.filter(subPathPredicate) .map(Path::toAbsolutePath) - .forEach(shardDataPaths::add); + .forEach(indexSubPaths::add); } } } @@ -1055,12 +1081,7 @@ private void ensureNoShardData(final NodePath[] nodePaths) throws IOException { } } - if (shardDataPaths.isEmpty() == false) { - throw new IllegalStateException("Node is started with " - + Node.NODE_DATA_SETTING.getKey() - + "=false, but has shard data: " - + shardDataPaths); - } + return indexSubPaths; } private boolean isShardPath(Path path) { @@ -1068,6 +1089,11 @@ private boolean isShardPath(Path path) { && path.getFileName().toString().chars().allMatch(Character::isDigit); } + private boolean isIndexMetaDataPath(Path path) { + return Files.isDirectory(path) + && path.getFileName().toString().equals(MetaDataStateFormat.STATE_DIR_NAME); + } + /** * Resolve the custom path for a index's shard. * Uses the {@code IndexMetaData.SETTING_DATA_PATH} setting to determine diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java index bdca86858701f..36f75c79a1792 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -42,15 +42,37 @@ public void testStartFailureOnDataForNonDataNode() throws Exception { ).get(); final String indexUUID = resolveIndex(indexName).getUUID(); + logger.info("--> restarting the node with node.data=false and node.master=false"); + IllegalStateException ex = expectThrows(IllegalStateException.class, + "Node started with node.data=false and node.master=false while having existing index metadata must fail", + () -> + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder() + .put(Node.NODE_DATA_SETTING.getKey(), false) + .put(Node.NODE_MASTER_SETTING.getKey(), false) + .build(); + } + })); + assertThat(ex.getMessage(), containsString(indexUUID)); + assertThat(ex.getMessage(), + startsWith("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false and " + + Node.NODE_MASTER_SETTING.getKey() + + "=false, but has index metadata")); + + // client() also starts the node logger.info("--> indexing a simple document"); client().prepareIndex(indexName, "type1", "1").setSource("field1", "value1").get(); - logger.info("--> restarting the node with node.data=true"); + logger.info("--> restarting the node with node.data=true and node.master=true"); internalCluster().restartRandomDataNode(); logger.info("--> restarting the node with node.data=false"); - IllegalStateException ex = expectThrows(IllegalStateException.class, - "Node started with node.data=false and existing shard data must fail", + ex = expectThrows(IllegalStateException.class, + "Node started with node.data=false while having existing shard data must fail", () -> internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 2771bc9f243ac..a667514fa7ef2 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -472,45 +472,96 @@ public void testExistingTempFiles() throws IOException { } } - public void testEnsureNoShardData() throws IOException { + public void testEnsureNoShardDataOrIndexMetaData() throws IOException { Settings settings = buildEnvSettings(Settings.EMPTY); Index index = new Index("test", "testUUID"); + // build settings using same path.data as original but with node.data=false and node.master=false + Settings noDataNoMasterSettings = Settings.builder() + .put(settings) + .put(Node.NODE_DATA_SETTING.getKey(), false) + .put(Node.NODE_MASTER_SETTING.getKey(), false) + .build(); + + // test that we can create data=false and master=false with no meta information + newNodeEnvironment(noDataNoMasterSettings).close(); + + Path indexPath; try (NodeEnvironment env = newNodeEnvironment(settings)) { for (Path path : env.indexPaths(index)) { Files.createDirectories(path.resolve(MetaDataStateFormat.STATE_DIR_NAME)); } + indexPath = env.indexPaths(index)[0]; } + verifyFailsOnMetaData(noDataNoMasterSettings, indexPath); + // build settings using same path.data as original but with node.data=false Settings noDataSettings = Settings.builder() .put(settings) .put(Node.NODE_DATA_SETTING.getKey(), false).build(); String shardDataDirName = Integer.toString(randomInt(10)); - Path shardPath; - // test that we can create data=false env with only meta information + // test that we can create data=false env with only meta information. Also create shard data for following asserts try (NodeEnvironment env = newNodeEnvironment(noDataSettings)) { for (Path path : env.indexPaths(index)) { Files.createDirectories(path.resolve(shardDataDirName)); } - shardPath = env.indexPaths(index)[0]; } + verifyFailsOnShardData(noDataSettings, indexPath, shardDataDirName); + + // assert that we get the stricter message on meta-data when both conditions fail + verifyFailsOnMetaData(noDataNoMasterSettings, indexPath); + + // build settings using same path.data as original but with node.master=false + Settings noMasterSettings = Settings.builder() + .put(settings) + .put(Node.NODE_MASTER_SETTING.getKey(), false) + .build(); + + // test that we can create master=false env regardless of data. + newNodeEnvironment(noMasterSettings).close(); + + // test that we can create data=true, master=true env. Also remove state dir to leave only shard data for following asserts + try (NodeEnvironment env = newNodeEnvironment(settings)) { + for (Path path : env.indexPaths(index)) { + Files.delete(path.resolve(MetaDataStateFormat.STATE_DIR_NAME)); + } + } + + // assert that we fail on shard data even without the metadata dir. + verifyFailsOnShardData(noDataSettings, indexPath, shardDataDirName); + verifyFailsOnShardData(noDataNoMasterSettings, indexPath, shardDataDirName); + } + + private void verifyFailsOnShardData(Settings settings, Path indexPath, String shardDataDirName) { IllegalStateException ex = expectThrows(IllegalStateException.class, "Must fail creating NodeEnvironment on a data path that has shard data if node.data=false", - () -> newNodeEnvironment(noDataSettings).close()); + () -> newNodeEnvironment(settings).close()); assertThat(ex.getMessage(), - containsString(shardPath.resolve(shardDataDirName).toAbsolutePath().toString())); + containsString(indexPath.resolve(shardDataDirName).toAbsolutePath().toString())); assertThat(ex.getMessage(), startsWith("Node is started with " + Node.NODE_DATA_SETTING.getKey() + "=false, but has shard data")); + } - // test that we can create data=true env - newNodeEnvironment(settings).close(); + private void verifyFailsOnMetaData(Settings settings, Path indexPath) { + IllegalStateException ex = expectThrows(IllegalStateException.class, + "Must fail creating NodeEnvironment on a data path that has index meta-data if node.data=false and node.master=false", + () -> newNodeEnvironment(settings).close()); + + assertThat(ex.getMessage(), + containsString(indexPath.resolve(MetaDataStateFormat.STATE_DIR_NAME).toAbsolutePath().toString())); + assertThat(ex.getMessage(), + startsWith("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false and " + + Node.NODE_MASTER_SETTING.getKey() + + "=false, but has index metadata")); } /** Converts an array of Strings to an array of Paths, adding an additional child if specified */ From b4b4cd6ebd09c88d857b50f43b9b7b3331ead5d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 25 Jan 2019 14:23:02 +0100 Subject: [PATCH 45/64] Clean codebase from empty statements (#37822) * Remove empty statements There are a couple of instances of undocumented empty statements all across the code base. While they are mostly harmless, they make the code hard to read and are potentially error-prone. Removing most of these instances and marking blocks that look empty by intention as such. * Change test, slightly more verbose but less confusing --- .../client/core/MultiTermVectorsRequest.java | 2 +- .../client/ml/job/config/DetectionRule.java | 2 +- .../common/xcontent/ObjectParserTests.java | 2 +- .../common/KeepTypesFilterFactory.java | 2 +- .../index/rankeval/MetricDetail.java | 2 +- ...AsyncBulkByScrollActionScriptTestCase.java | 2 +- .../common/settings/Settings.java | 4 +- .../common/util/CollectionUtils.java | 3 +- .../analysis/PreBuiltCacheFactory.java | 2 +- .../script/ScoreScriptUtils.java | 2 +- .../query/EarlyTerminatingCollector.java | 2 +- .../search/sort/FieldSortBuilder.java | 2 +- .../restore/RestoreSnapshotRequestTests.java | 2 +- .../routing/GroupShardsIteratorTests.java | 2 +- .../cluster/service/TaskBatcherTests.java | 10 +++- .../common/geo/GeoDistanceTests.java | 2 +- .../geo/builders/CircleBuilderTests.java | 5 +- .../common/io/stream/BytesStreamsTests.java | 2 +- .../concurrent/AsyncIOProcessorTests.java | 4 +- .../index/engine/LiveVersionMapTests.java | 2 +- .../index/mapper/KeywordFieldMapperTests.java | 4 +- .../functionscore/FunctionScoreTests.java | 2 +- .../reindex/DeleteByQueryRequestTests.java | 4 +- .../reindex/UpdateByQueryRequestTests.java | 4 +- .../InternalPercentilesBucketTests.java | 4 -- .../search/sort/NestedSortBuilderTests.java | 2 +- .../test/EqualsHashCodeTestUtils.java | 4 +- .../index/engine/FrozenEngine.java | 2 +- .../process/autodetect/state/DataCounts.java | 22 ++++---- .../xpack/core/scheduler/Cron.java | 4 +- .../ml/job/config/AnalysisLimitsTests.java | 3 +- .../ml/job/config/DataDescriptionTests.java | 3 +- .../ml/integration/DatafeedJobsRestIT.java | 2 +- .../ml/action/TransportForecastJobAction.java | 2 +- .../job/process/normalizer/Normalizable.java | 2 +- .../xpack/sql/analysis/analyzer/Analyzer.java | 4 +- .../xpack/sql/expression/AttributeMap.java | 2 +- .../scalar/string/StringProcessor.java | 8 +-- .../xpack/sql/optimizer/Optimizer.java | 50 +++++++++---------- .../xpack/sql/parser/SqlParser.java | 4 +- .../xpack/sql/session/Cursors.java | 2 +- 41 files changed, 97 insertions(+), 93 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsRequest.java index 8ec5e79993cd8..4008e50aa4fd8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsRequest.java @@ -37,7 +37,7 @@ public class MultiTermVectorsRequest implements ToXContentObject, Validatable { * Constructs an empty MultiTermVectorsRequest * After that use {@code add} method to add individual {@code TermVectorsRequest} to it. */ - public MultiTermVectorsRequest() {}; + public MultiTermVectorsRequest() {} /** * Constructs a MultiTermVectorsRequest from the given document ids diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectionRule.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectionRule.java index bcba8a7d74a61..7adeca1543fe0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectionRule.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/DetectionRule.java @@ -38,7 +38,7 @@ public class DetectionRule implements ToXContentObject { public static final ParseField CONDITIONS_FIELD = new ParseField("conditions"); public static final ObjectParser PARSER = - new ObjectParser<>(DETECTION_RULE_FIELD.getPreferredName(), true, Builder::new);; + new ObjectParser<>(DETECTION_RULE_FIELD.getPreferredName(), true, Builder::new); static { PARSER.declareStringArray(Builder::setActions, ACTIONS_FIELD); diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java index 889f1619614aa..e089b8a956ac8 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java @@ -297,7 +297,7 @@ public void setObjectArray(List objectArray) { enum TestEnum { FOO, BAR - }; + } public void testParseEnumFromString() throws IOException { class TestStruct { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeepTypesFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeepTypesFilterFactory.java index b6b8b45fabfc2..f0b069c76a757 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeepTypesFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeepTypesFilterFactory.java @@ -69,7 +69,7 @@ private static KeepTypesMode fromString(String modeString) { + KeepTypesMode.EXCLUDE + "] but was [" + modeString + "]."); } } - }; + } KeepTypesFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MetricDetail.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MetricDetail.java index bc95b03c8bd13..da77825c0c867 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MetricDetail.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MetricDetail.java @@ -37,7 +37,7 @@ default XContentBuilder toXContent(XContentBuilder builder, Params params) throw innerToXContent(builder, params); builder.endObject(); return builder.endObject(); - }; + } default String getMetricName() { return getWriteableName(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java index d452ea23bc155..c9eba1927ef15 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java @@ -60,7 +60,7 @@ protected T applyScript(Consumer> public void execute() { scriptBody.accept(getCtx()); } - };; + }; when(scriptService.compile(any(), eq(UpdateScript.CONTEXT))).thenReturn(factory); AbstractAsyncBulkByScrollAction action = action(scriptService, request().setScript(mockScript(""))); RequestWrapper result = action.buildScriptApplier().apply(AbstractAsyncBulkByScrollAction.wrap(index), doc); diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index 107c7bb9b8a59..72f9406edac4c 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.settings; import org.apache.logging.log4j.Level; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchParseException; @@ -44,6 +43,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import java.io.IOException; import java.io.InputStream; @@ -60,12 +60,12 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.ListIterator; import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; import java.util.Set; import java.util.TreeMap; -import java.util.ListIterator; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Predicate; diff --git a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index ce1bfe87131ba..a93664b517f84 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.util; import com.carrotsearch.hppc.ObjectArrayList; + import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefArray; import org.apache.lucene.util.BytesRefBuilder; @@ -301,8 +302,8 @@ public T get(int index) { public int size() { return in.size(); } + } - }; public static void sort(final BytesRefArray bytes, final int[] indices) { sort(new BytesRefBuilder(), new BytesRefBuilder(), bytes, indices); } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java index 38ae484a42b24..7539c1653cce4 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java @@ -36,7 +36,7 @@ public class PreBuiltCacheFactory { * ELASTICSEARCH Exactly one version per elasticsearch version is stored. Useful if you change an analyzer between elasticsearch * releases, when the lucene version does not change */ - public enum CachingStrategy { ONE, LUCENE, ELASTICSEARCH }; + public enum CachingStrategy { ONE, LUCENE, ELASTICSEARCH } public interface PreBuiltCache { diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java index f1358bc3c6ba1..273b8fcf8559d 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java @@ -132,7 +132,7 @@ public DecayGeoGauss(String originStr, String scaleStr, String offsetStr, double this.originLat = origin.lat(); this.originLon = origin.lon(); this.offset = DistanceUnit.DEFAULT.parse(offsetStr, DistanceUnit.DEFAULT); - this.scaling = 0.5 * Math.pow(scale, 2.0) / Math.log(decay);; + this.scaling = 0.5 * Math.pow(scale, 2.0) / Math.log(decay); } public double decayGeoGauss(GeoPoint docValue) { diff --git a/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java b/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java index 2cfcdf1ae669d..0633f1c6bbe97 100644 --- a/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java +++ b/server/src/main/java/org/elasticsearch/search/query/EarlyTerminatingCollector.java @@ -78,7 +78,7 @@ public void collect(int doc) throws IOException { } } super.collect(doc); - }; + } }; } diff --git a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 6d58917d84b8f..9bd1efbe757f3 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -93,7 +93,7 @@ public FieldSortBuilder(FieldSortBuilder template) { this.setNestedPath(template.getNestedPath()); if (template.getNestedSort() != null) { this.setNestedSort(template.getNestedSort()); - }; + } } /** diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java index fbe8761a07d12..59518b6e9966f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java @@ -76,7 +76,7 @@ private RestoreSnapshotRequest randomState(RestoreSnapshotRequest instance) { int count = randomInt(3) + 1; for (int i = 0; i < count; ++i) { - indexSettings.put(randomAlphaOfLengthBetween(2, 5), randomAlphaOfLengthBetween(2, 5));; + indexSettings.put(randomAlphaOfLengthBetween(2, 5), randomAlphaOfLengthBetween(2, 5)); } instance.indexSettings(indexSettings); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java index 66eabd4cbd921..7012b7af68c78 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/GroupShardsIteratorTests.java @@ -83,5 +83,5 @@ public ShardRouting newRouting(Index index, int id, boolean started) { shardRouting = ShardRoutingHelper.moveToStarted(shardRouting); } return shardRouting; - }; + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java index ebb15b42b7a3a..ee31b9d85f7b9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java @@ -34,8 +34,10 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -260,8 +262,10 @@ public void testSingleBatchSubmission() throws InterruptedException { Map tasks = new HashMap<>(); final int numOfTasks = randomInt(10); final CountDownLatch latch = new CountDownLatch(numOfTasks); + Set usedKeys = new HashSet<>(numOfTasks); for (int i = 0; i < numOfTasks; i++) { - while (null != tasks.put(randomInt(1024), new TestListener() { + int key = randomValueOtherThanMany(k -> usedKeys.contains(k), () -> randomInt(1024)); + tasks.put(key, new TestListener() { @Override public void processed(String source) { latch.countDown(); @@ -271,8 +275,10 @@ public void processed(String source) { public void onFailure(String source, Exception e) { fail(ExceptionsHelper.detailedMessage(e)); } - })) ; + }); + usedKeys.add(key); } + assert usedKeys.size() == numOfTasks; TestExecutor executor = taskList -> { assertThat(taskList.size(), equalTo(tasks.size())); diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoDistanceTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoDistanceTests.java index c2e62277da74f..9b3a2d9ce44b0 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoDistanceTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoDistanceTests.java @@ -45,7 +45,7 @@ public void testGeoDistanceSerialization() throws IOException { GeoDistance geoDistance = randomFrom(GeoDistance.PLANE, GeoDistance.ARC); try (BytesStreamOutput out = new BytesStreamOutput()) { geoDistance.writeTo(out); - try (StreamInput in = out.bytes().streamInput()) {; + try (StreamInput in = out.bytes().streamInput()) { GeoDistance copy = GeoDistance.readFromStream(in); assertEquals(copy.toString() + " vs. " + geoDistance.toString(), copy, geoDistance); } diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java index b3892d9d551f5..10ac6e1b014f9 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java @@ -19,9 +19,8 @@ package org.elasticsearch.common.geo.builders; -import org.locationtech.jts.geom.Coordinate; - import org.elasticsearch.common.unit.DistanceUnit; +import org.locationtech.jts.geom.Coordinate; import java.io.IOException; @@ -59,7 +58,7 @@ static CircleBuilder mutate(CircleBuilder original) throws IOException { DistanceUnit newRandom = unit; while (newRandom == unit) { newRandom = randomFrom(DistanceUnit.values()); - }; + } unit = newRandom; } return mutation.radius(radius, unit); diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index adfe90755dd03..430cd900660c3 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -570,7 +570,7 @@ protected byte[] randomizedByteArrayWithSize(int size) { } public void testReadWriteGeoPoint() throws IOException { - try (BytesStreamOutput out = new BytesStreamOutput()) {; + try (BytesStreamOutput out = new BytesStreamOutput()) { GeoPoint geoPoint = new GeoPoint(randomDouble(), randomDouble()); out.writeGenericValue(geoPoint); StreamInput wrap = out.bytes().streamInput(); diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java index 387f15e3f3319..72a1e21d78865 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessorTests.java @@ -67,7 +67,7 @@ public void run() { } catch (Exception ex) { throw new RuntimeException(ex); } - }; + } }; thread[i].start(); } @@ -120,7 +120,7 @@ public void run() { } catch (Exception ex) { throw new RuntimeException(ex); } - }; + } }; thread[i].start(); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index 115785b2e7b96..e45ddd87ba3cf 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -392,7 +392,7 @@ public void testAddAndDeleteRefreshConcurrently() throws IOException, Interrupte public void testPruneTombstonesWhileLocked() throws InterruptedException, IOException { LiveVersionMap map = new LiveVersionMap(); BytesRef uid = uid("1"); - ; + try (Releasable ignore = map.acquireLock(uid)) { map.putDeleteUnderLock(uid, new DeleteVersionValue(0, 0, 0, 0)); map.beforeRefresh(); // refresh otherwise we won't prune since it's tracked by the current map diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index d7f8d48fc5cf3..dd7cb17ef127c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -51,8 +51,8 @@ import java.util.Map; import static java.util.Collections.singletonList; -import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; import static java.util.Collections.singletonMap; +import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -81,7 +81,7 @@ public Tokenizer create() { }); } - }; + } @Override protected Collection> getPlugins() { diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java index fa0a372a2adb8..c99886c3f660d 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java @@ -861,6 +861,6 @@ protected boolean doEquals(ScoreFunction other) { @Override protected int doHashCode() { return 0; - }; + } } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java b/server/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java index 8c84c8f3f5680..76c6dc03b5a49 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryRequestTests.java @@ -56,10 +56,10 @@ public void testDeleteteByQueryRequestImplementsIndicesRequestReplaceable() { newIndices[i] = randomSimpleString(random(), 1, 30); } request.indices(newIndices); - for (int i = 0; i < numNewIndices; i++) {; + for (int i = 0; i < numNewIndices; i++) { assertEquals(newIndices[i], request.indices()[i]); } - for (int i = 0; i < numNewIndices; i++) {; + for (int i = 0; i < numNewIndices; i++) { assertEquals(newIndices[i], request.getSearchRequest().indices()[i]); } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java b/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java index 47449eb739199..207f0f12ff23d 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java @@ -49,10 +49,10 @@ public void testUpdateByQueryRequestImplementsIndicesRequestReplaceable() { newIndices[i] = randomSimpleString(random(), 1, 30); } request.indices(newIndices); - for (int i = 0; i < numNewIndices; i++) {; + for (int i = 0; i < numNewIndices; i++) { assertEquals(newIndices[i], request.indices()[i]); } - for (int i = 0; i < numNewIndices; i++) {; + for (int i = 0; i < numNewIndices; i++) { assertEquals(newIndices[i], request.getSearchRequest().indices()[i]); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java index 78ef2e1df392e..6ce0c07f96ff3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java @@ -28,9 +28,6 @@ import org.elasticsearch.search.aggregations.Aggregation.CommonFields; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.metrics.Percentile; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.InternalPercentilesBucket; -import org.elasticsearch.search.aggregations.pipeline.ParsedPercentilesBucket; import org.elasticsearch.test.InternalAggregationTestCase; import java.io.IOException; @@ -192,7 +189,6 @@ protected InternalPercentilesBucket mutateInstance(InternalPercentilesBucket ins String name = instance.getName(); double[] percents = extractPercents(instance); double[] percentiles = extractPercentiles(instance); - ; DocValueFormat formatter = instance.formatter(); List pipelineAggregators = instance.pipelineAggregators(); Map metaData = instance.getMetaData(); diff --git a/server/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java index b0613b320b86a..e644a070faecd 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/sort/NestedSortBuilderTests.java @@ -152,7 +152,7 @@ public void testRewrite() throws IOException { @Override protected QueryBuilder doRewrite(org.elasticsearch.index.query.QueryRewriteContext queryShardContext) throws IOException { return new MatchAllQueryBuilder(); - }; + } }; // test that filter gets rewritten NestedSortBuilder original = new NestedSortBuilder("path").setFilter(filterThatRewrites); diff --git a/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java index 44b845e5c2cac..d08b9541ffb2a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java @@ -40,7 +40,7 @@ public class EqualsHashCodeTestUtils { */ public interface CopyFunction { T copy(T t) throws IOException; - }; + } /** * A function that creates a copy of its input argument that is different from its @@ -48,7 +48,7 @@ public interface CopyFunction { */ public interface MutateFunction { T mutate(T t) throws IOException; - }; + } /** * Perform common equality and hashCode checks on the input object diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java index 513400cb72c3c..7fbb58504efc9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java @@ -324,7 +324,7 @@ private LazyDirectoryReader(DirectoryReader reader, FrozenEngine engine) throws @Override public LeafReader wrap(LeafReader reader) { return new LazyLeafReader(reader); - }; + } }); this.delegate = reader; this.engine = engine; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java index b13e702e85de3..08a6d8846f9a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java @@ -45,14 +45,14 @@ public class DataCounts implements ToXContentObject, Writeable { public static final String MISSING_FIELD_COUNT_STR = "missing_field_count"; public static final String OUT_OF_ORDER_TIME_COUNT_STR = "out_of_order_timestamp_count"; public static final String EMPTY_BUCKET_COUNT_STR = "empty_bucket_count"; - public static final String SPARSE_BUCKET_COUNT_STR = "sparse_bucket_count"; + public static final String SPARSE_BUCKET_COUNT_STR = "sparse_bucket_count"; public static final String BUCKET_COUNT_STR = "bucket_count"; public static final String EARLIEST_RECORD_TIME_STR = "earliest_record_timestamp"; public static final String LATEST_RECORD_TIME_STR = "latest_record_timestamp"; public static final String LAST_DATA_TIME_STR = "last_data_time"; public static final String LATEST_EMPTY_BUCKET_TIME_STR = "latest_empty_bucket_timestamp"; public static final String LATEST_SPARSE_BUCKET_TIME_STR = "latest_sparse_bucket_timestamp"; - + public static final ParseField PROCESSED_RECORD_COUNT = new ParseField(PROCESSED_RECORD_COUNT_STR); public static final ParseField PROCESSED_FIELD_COUNT = new ParseField(PROCESSED_FIELD_COUNT_STR); public static final ParseField INPUT_BYTES = new ParseField(INPUT_BYTES_STR); @@ -68,7 +68,7 @@ public class DataCounts implements ToXContentObject, Writeable { public static final ParseField LATEST_RECORD_TIME = new ParseField(LATEST_RECORD_TIME_STR); public static final ParseField LAST_DATA_TIME = new ParseField(LAST_DATA_TIME_STR); public static final ParseField LATEST_EMPTY_BUCKET_TIME = new ParseField(LATEST_EMPTY_BUCKET_TIME_STR); - public static final ParseField LATEST_SPARSE_BUCKET_TIME = new ParseField(LATEST_SPARSE_BUCKET_TIME_STR); + public static final ParseField LATEST_SPARSE_BUCKET_TIME = new ParseField(LATEST_SPARSE_BUCKET_TIME_STR); public static final ParseField TYPE = new ParseField("data_counts"); @@ -99,7 +99,7 @@ public class DataCounts implements ToXContentObject, Writeable { p -> TimeUtils.parseTimeField(p, LATEST_EMPTY_BUCKET_TIME.getPreferredName()), LATEST_EMPTY_BUCKET_TIME, ValueType.VALUE); PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> TimeUtils.parseTimeField(p, LATEST_SPARSE_BUCKET_TIME.getPreferredName()), LATEST_SPARSE_BUCKET_TIME, ValueType.VALUE); - PARSER.declareLong((t, u) -> {;}, INPUT_RECORD_COUNT); + PARSER.declareLong((t, u) -> {/* intentionally empty */}, INPUT_RECORD_COUNT); } public static String documentId(String jobId) { @@ -131,7 +131,7 @@ public static String v54DocumentId(String jobId) { public DataCounts(String jobId, long processedRecordCount, long processedFieldCount, long inputBytes, long inputFieldCount, long invalidDateCount, long missingFieldCount, long outOfOrderTimeStampCount, long emptyBucketCount, long sparseBucketCount, long bucketCount, - Date earliestRecordTimeStamp, Date latestRecordTimeStamp, Date lastDataTimeStamp, + Date earliestRecordTimeStamp, Date latestRecordTimeStamp, Date lastDataTimeStamp, Date latestEmptyBucketTimeStamp, Date latestSparseBucketTimeStamp) { this.jobId = jobId; this.processedRecordCount = processedRecordCount; @@ -195,7 +195,7 @@ public DataCounts(StreamInput in) throws IOException { if (in.readBoolean()) { lastDataTimeStamp = new Date(in.readVLong()); } - if (in.readBoolean()) { + if (in.readBoolean()) { latestEmptyBucketTimeStamp = new Date(in.readVLong()); } if (in.readBoolean()) { @@ -346,9 +346,9 @@ public long getEmptyBucketCount() { public void incrementEmptyBucketCount(long additional) { emptyBucketCount += additional; } - + /** - * The number of buckets with few records compared to the overall counts. + * The number of buckets with few records compared to the overall counts. * Used to measure general data fitness and/or configuration problems (bucket span). * * @return Number of sparse buckets processed by this job {@code long} @@ -360,7 +360,7 @@ public long getSparseBucketCount() { public void incrementSparseBucketCount(long additional) { sparseBucketCount += additional; } - + /** * The number of buckets overall. * @@ -443,7 +443,7 @@ public Date getLatestEmptyBucketTimeStamp() { public void setLatestEmptyBucketTimeStamp(Date latestEmptyBucketTimeStamp) { this.latestEmptyBucketTimeStamp = latestEmptyBucketTimeStamp; } - + public void updateLatestEmptyBucketTimeStamp(Date latestEmptyBucketTimeStamp) { if (latestEmptyBucketTimeStamp != null && (this.latestEmptyBucketTimeStamp == null || @@ -472,7 +472,7 @@ public void updateLatestSparseBucketTimeStamp(Date latestSparseBucketTimeStamp) this.latestSparseBucketTimeStamp = latestSparseBucketTimeStamp; } } - + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java index d4ccc22d32ab4..607b49be1f453 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java @@ -1214,7 +1214,7 @@ private static String expressionSetSummary(java.util.Set set) { private static int skipWhiteSpace(int i, String s) { for (; i < s.length() && (s.charAt(i) == ' ' || s.charAt(i) == '\t'); i++) { - ; + // intentionally empty } return i; @@ -1222,7 +1222,7 @@ private static int skipWhiteSpace(int i, String s) { private static int findNextWhiteSpace(int i, String s) { for (; i < s.length() && (s.charAt(i) != ' ' || s.charAt(i) != '\t'); i++) { - ; + // intentionally empty } return i; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisLimitsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisLimitsTests.java index f73fcc41bce89..05b7e2719fb4f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisLimitsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisLimitsTests.java @@ -167,6 +167,7 @@ public void testVerify_GivenValid() { new AnalysisLimits(1L, 1L); } + @Override protected AnalysisLimits mutateInstance(AnalysisLimits instance) throws IOException { Long memoryModelLimit = instance.getModelMemoryLimit(); Long categorizationExamplesLimit = instance.getCategorizationExamplesLimit(); @@ -197,5 +198,5 @@ protected AnalysisLimits mutateInstance(AnalysisLimits instance) throws IOExcept throw new AssertionError("Illegal randomisation branch"); } return new AnalysisLimits(memoryModelLimit, categorizationExamplesLimit); - }; + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DataDescriptionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DataDescriptionTests.java index 99cf3df31b4cb..9d92b2999c501 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DataDescriptionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DataDescriptionTests.java @@ -282,6 +282,7 @@ protected DataDescription doParseInstance(XContentParser parser) { return DataDescription.STRICT_PARSER.apply(parser, null).build(); } + @Override protected DataDescription mutateInstance(DataDescription instance) throws java.io.IOException { DataFormat format = instance.getFormat(); String timeField = instance.getTimeField(); @@ -320,5 +321,5 @@ protected DataDescription mutateInstance(DataDescription instance) throws java.i throw new AssertionError("Illegal randomisation branch"); } return new DataDescription(format, timeField, timeFormat, delimiter, quoteChar); - }; + } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java index b794fee311805..e721b5b88fdf8 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java @@ -239,7 +239,7 @@ private void addNetworkData(String index) throws IOException { + " \"network_bytes_out\": { \"type\":\"long\"}" + " }" + " }" - + "}");; + + "}"); client().performRequest(createIndexRequest); StringBuilder bulk = new StringBuilder(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java index 3a26f9f863cdb..ac3d41f26e010 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java @@ -78,7 +78,7 @@ protected void taskOperation(ForecastJobAction.Request request, TransportOpenJob ForecastParams params = paramsBuilder.build(); processManager.forecastJob(task, params, e -> { if (e == null) { -; getForecastRequestStats(request.getJobId(), params.getForecastId(), listener); + getForecastRequestStats(request.getJobId(), params.getForecastId(), listener); } else { listener.onFailure(e); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/Normalizable.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/Normalizable.java index 0d88372de17e4..8a2a57d1a46dc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/Normalizable.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/Normalizable.java @@ -11,7 +11,7 @@ import java.util.Objects; public abstract class Normalizable implements ToXContentObject { - public enum ChildType {BUCKET_INFLUENCER, RECORD}; + public enum ChildType {BUCKET_INFLUENCER, RECORD} private final String indexName; private boolean hadBigNormalizedUpdate; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java index 097758976da84..f0137a250cc79 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java @@ -745,7 +745,7 @@ private static UnresolvedAttribute resolveMetadataToMessage(UnresolvedAttribute } } return ua; - }; + } } // to avoid creating duplicate functions @@ -920,7 +920,7 @@ protected LogicalPlan rule(Project p) { } return p; } - }; + } // // Handle aggs in HAVING. To help folding any aggs not found in Aggregation diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java index b5d137617722e..9f64246a514f7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java @@ -138,7 +138,7 @@ public A[] toArray(A[] a) { public String toString() { return set.toString(); } - }; + } private final Map delegate; private Set keySet = null; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringProcessor.java index fb9f152504bd3..6f8d7c93fc633 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringProcessor.java @@ -17,7 +17,7 @@ import java.util.function.Function; public class StringProcessor implements Processor { - + private interface StringFunction { default R apply(Object o) { if (!(o instanceof String || o instanceof Character)) { @@ -57,11 +57,11 @@ public enum StringOperation { int i = n.intValue(); if (i < 0) { return null; - }; + } char[] spaces = new char[i]; char whitespace = ' '; Arrays.fill(spaces, whitespace); - + return new String(spaces); }), BIT_LENGTH((String s) -> UnicodeUtil.calcUTF16toUTF8Length(s, 0, s.length()) * 8), @@ -91,7 +91,7 @@ public String toString() { return this == CHAR ? "character" : super.toString(); } } - + public static final String NAME = "s"; private final StringOperation processor; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java index 1865fd4eea126..38313fa613a0e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -1136,7 +1136,7 @@ protected Expression rule(Expression e) { return e.foldable() ? Literal.of(e) : e; } } - + static class SimplifyConditional extends OptimizerExpressionRule { SimplifyConditional() { @@ -1355,7 +1355,7 @@ private Expression literalToTheRight(BinaryOperator be) { * Propagate Equals to eliminate conjuncted Ranges. * When encountering a different Equals or non-containing {@link Range}, the conjunction becomes false. * When encountering a containing {@link Range}, the range gets eliminated by the equality. - * + * * This rule doesn't perform any promotion of {@link BinaryComparison}s, that is handled by * {@link CombineBinaryComparisons} on purpose as the resulting Range might be foldable * (which is picked by the folding rule on the next run). @@ -1420,7 +1420,7 @@ private Expression propagate(And and) { continue; } Object eqValue = eq.right().fold(); - + for (int i = 0; i < ranges.size(); i++) { Range range = ranges.get(i); @@ -1448,14 +1448,14 @@ private Expression propagate(And and) { return FALSE; } } - + // it's in the range and thus, remove it ranges.remove(i); changed = true; } } } - + return changed ? Predicates.combineAnd(CollectionUtils.combine(exps, equals, ranges)) : and; } } @@ -1475,7 +1475,7 @@ protected Expression rule(Expression e) { } return e; } - + // combine conjunction private Expression combine(And and) { List ranges = new ArrayList<>(); @@ -1504,7 +1504,7 @@ private Expression combine(And and) { exps.add(ex); } } - + // finally try combining any left BinaryComparisons into possible Ranges // this could be a different rule but it's clearer here wrt the order of comparisons @@ -1513,14 +1513,14 @@ private Expression combine(And and) { for (int j = i + 1; j < bcs.size(); j++) { BinaryComparison other = bcs.get(j); - + if (main.left().semanticEquals(other.left())) { // >/>= AND ranges, boolean private boolean findConjunctiveComparisonInRange(BinaryComparison main, List ranges) { Object value = main.right().fold(); - + // NB: the loop modifies the list (hence why the int is used) for (int i = 0; i < ranges.size(); i++) { Range other = ranges.get(i); - + if (main.left().semanticEquals(other.value())) { - + if (main instanceof GreaterThan || main instanceof GreaterThanOrEqual) { if (other.lower().foldable()) { Integer comp = BinaryComparison.compare(value, other.lower().fold()); @@ -1705,7 +1705,7 @@ private boolean findConjunctiveComparisonInRange(BinaryComparison main, List 2 < a < 3 boolean lower = comp > 0 || lowerEq; - + if (lower) { ranges.remove(i); ranges.add(i, @@ -1745,14 +1745,14 @@ private boolean findConjunctiveComparisonInRange(BinaryComparison main, List bcs, boolean conjunctive) { Object value = main.right().fold(); - + // NB: the loop modifies the list (hence why the int is used) for (int i = 0; i < bcs.size(); i++) { BinaryComparison other = bcs.get(i); @@ -1763,10 +1763,10 @@ private static boolean findExistingComparison(BinaryComparison main, List T invokeParser(String sql, log.info(format(Locale.ROOT, " %-15s '%s'", symbolicName == null ? literalName : symbolicName, t.getText())); - }; + } } ParserRuleContext tree = parseFunction.apply(parser); @@ -145,7 +145,7 @@ private T invokeParser(String sql, } private static void debug(SqlBaseParser parser) { - + // when debugging, use the exact prediction mode (needed for diagnostics as well) parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java index 25989ab0af7d2..6a3de2cf174de 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java @@ -35,7 +35,7 @@ public final class Cursors { private static final NamedWriteableRegistry WRITEABLE_REGISTRY = new NamedWriteableRegistry(getNamedWriteables()); - private Cursors() {}; + private Cursors() {} /** * The {@link NamedWriteable}s required to deserialize {@link Cursor}s. From ef8dd12c6d87f40e2c41da274bbb08136ae164e4 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 25 Jan 2019 15:09:27 +0100 Subject: [PATCH 46/64] Limit number of documents indexed in CloseIndexIT test This test indexes an unlimited number of documents, this commit reduces this number to 25K and also tracks exact number of hits when counting the docs. --- .../org/elasticsearch/indices/state/CloseIndexIT.java | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java index e00c5038ce273..a56a65f6e4deb 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -52,6 +52,8 @@ public class CloseIndexIT extends ESIntegTestCase { + private static final int MAX_DOCS = 25_000; + public void testCloseMissingIndex() { IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().admin().indices().prepareClose("test").get()); assertThat(e.getMessage(), is("no such index [test]")); @@ -168,7 +170,7 @@ public void testCloseWhileIndexingDocuments() throws Exception { createIndex(indexName); int nbDocs = 0; - try (BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client())) { + try (BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), MAX_DOCS)) { indexer.setAssertNoFailuresOnStop(false); waitForDocs(randomIntBetween(10, 50), indexer); @@ -186,7 +188,7 @@ public void testCloseWhileIndexingDocuments() throws Exception { assertIndexIsClosed(indexName); assertAcked(client().admin().indices().prepareOpen(indexName)); - assertHitCount(client().prepareSearch(indexName).setSize(0).get(), nbDocs); + assertHitCount(client().prepareSearch(indexName).setSize(0).setTrackTotalHitsUpTo(MAX_DOCS).get(), nbDocs); } public void testCloseWhileDeletingIndices() throws Exception { @@ -247,7 +249,7 @@ public void testConcurrentClosesAndOpens() throws Exception { final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createIndex(indexName); - final BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client()); + final BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), MAX_DOCS); waitForDocs(1, indexer); final CountDownLatch latch = new CountDownLatch(1); @@ -299,7 +301,8 @@ public void testConcurrentClosesAndOpens() throws Exception { } refresh(indexName); assertIndexIsOpened(indexName); - assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexer.totalIndexedDocs()); + assertHitCount(client().prepareSearch(indexName).setSize(0).setTrackTotalHitsUpTo(MAX_DOCS).get(), + indexer.totalIndexedDocs()); } static void assertIndexIsClosed(final String... indices) { From 27c3fb8e0d8462e359418293c13818be101b4eb9 Mon Sep 17 00:00:00 2001 From: Vishnu Gt Date: Fri, 25 Jan 2019 20:26:34 +0530 Subject: [PATCH 47/64] Do not allow negative variances (#37384) Due to floating point error, it was possible for variances to become negative which should never happen. This bugfix sets variance to zero if it becomes negative as a result of fp error. --- .../metrics/ExtendedStatsAggregator.java | 3 +- .../metrics/InternalExtendedStats.java | 3 +- .../metrics/ExtendedStatsAggregatorTests.java | 31 ++++++++++++++++++- .../aggregations/metrics/ExtendedStatsIT.java | 3 +- .../pipeline/ExtendedStatsBucketIT.java | 1 + 5 files changed, 37 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java index 1d383a2ae1946..4774bec573e42 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregator.java @@ -202,7 +202,8 @@ public double metric(String name, long owningBucketOrd) { private double variance(long owningBucketOrd) { double sum = sums.get(owningBucketOrd); long count = counts.get(owningBucketOrd); - return (sumOfSqrs.get(owningBucketOrd) - ((sum * sum) / count)) / count; + double variance = (sumOfSqrs.get(owningBucketOrd) - ((sum * sum) / count)) / count; + return variance < 0 ? 0 : variance; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStats.java index 608fd1de435c8..26a244c8ddfb8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStats.java @@ -101,7 +101,8 @@ public double getSumOfSquares() { @Override public double getVariance() { - return (sumOfSqrs - ((sum * sum) / count)) / count; + double variance = (sumOfSqrs - ((sum * sum) / count)) / count; + return variance < 0 ? 0 : variance; } @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java index ca26ba1b20672..83713ff52af84 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java @@ -99,6 +99,34 @@ public void testRandomDoubles() throws IOException { ); } + /** + * Testcase for https://github.com/elastic/elasticsearch/issues/37303 + */ + public void testVarianceNonNegative() throws IOException { + MappedFieldType ft = + new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); + ft.setName("field"); + final ExtendedSimpleStatsAggregator expected = new ExtendedSimpleStatsAggregator(); + testCase(ft, + iw -> { + int numDocs = 3; + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + double value = 49.95d; + long valueAsLong = NumericUtils.doubleToSortableLong(value); + doc.add(new SortedNumericDocValuesField("field", valueAsLong)); + expected.add(value); + iw.addDocument(doc); + } + }, + stats -> { + //since the value(49.95) is a constant, variance should be 0 + assertEquals(0.0d, stats.getVariance(), TOLERANCE); + assertEquals(0.0d, stats.getStdDeviation(), TOLERANCE); + } + ); + } + public void testRandomLongs() throws IOException { MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); @@ -236,7 +264,8 @@ void add(double value) { } double variance() { - return (sumOfSqrs - ((sum * sum) / count)) / count; + double variance = (sumOfSqrs - ((sum * sum) / count)) / count; + return variance < 0 ? 0 : variance; } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 4aa16d6f1d5ca..bdf678174967b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -73,7 +73,8 @@ private static double variance(int... vals) { sum += val; sumOfSqrs += val * val; } - return (sumOfSqrs - ((sum * sum) / vals.length)) / vals.length; + double variance = (sumOfSqrs - ((sum * sum) / vals.length)) / vals.length; + return variance < 0 ? 0 : variance; } @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index a8ebf687ad623..9155947e3b6de 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -137,6 +137,7 @@ public void testGappyIndexWithSigma() { double sumOfSqrs = 1.0 + 1.0 + 1.0 + 4.0 + 0.0 + 1.0; double avg = sum / count; double var = (sumOfSqrs - ((sum * sum) / count)) / count; + var = var < 0 ? 0 : var; double stdDev = Math.sqrt(var); assertThat(extendedStatsBucketValue, notNullValue()); assertThat(extendedStatsBucketValue.getName(), equalTo("extended_stats_bucket")); From 85acc11ef7a183b97eebd35015039e7a5326bb14 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 25 Jan 2019 16:26:16 +0100 Subject: [PATCH 48/64] AsyncTwoPhaseIndexerTests race condition fixed (#37830) The unlucky timing can cause this test to fail when the indexing is triggered from `maybeTriggerAsyncJob`. As this is asynchronous, in can finish quicker then the test stepping over to next assertion The introduced barrier solves the problem closes #37695 --- .../indexing/AsyncTwoPhaseIndexerTests.java | 22 ++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index a3826bcf7cd7b..cfbac18dc9787 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -21,9 +21,11 @@ import java.io.IOException; import java.util.Collections; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -35,11 +37,14 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { private class MockIndexer extends AsyncTwoPhaseIndexer { + private final CountDownLatch latch; // test the execution order private int step; - protected MockIndexer(Executor executor, AtomicReference initialState, Integer initialPosition) { + protected MockIndexer(Executor executor, AtomicReference initialState, Integer initialPosition, + CountDownLatch latch) { super(executor, initialState, initialPosition, new MockJobStats()); + this.latch = latch; } @Override @@ -49,11 +54,20 @@ protected String getJobId() { @Override protected IterationResult doProcess(SearchResponse searchResponse) { + awaitForLatch(); assertThat(step, equalTo(3)); ++step; return new IterationResult(Collections.emptyList(), 3, true); } + private void awaitForLatch() { + try { + latch.await(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + @Override protected SearchRequest buildSearchRequest() { assertThat(step, equalTo(1)); @@ -196,12 +210,14 @@ public void testStateMachine() throws InterruptedException { final ExecutorService executor = Executors.newFixedThreadPool(1); isFinished.set(false); try { - - MockIndexer indexer = new MockIndexer(executor, state, 2); + CountDownLatch countDownLatch = new CountDownLatch(1); + MockIndexer indexer = new MockIndexer(executor, state, 2, countDownLatch); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + countDownLatch.countDown(); + assertThat(indexer.getPosition(), equalTo(2)); ESTestCase.awaitBusy(() -> isFinished.get()); assertThat(indexer.getStep(), equalTo(6)); From f1f54e0f6137e55c403da2bb25e27b98c38f2ade Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 25 Jan 2019 16:31:26 +0100 Subject: [PATCH 49/64] TransportUnfollowAction should increase settings version (#37859) The TransportUnfollowAction updates the index settings but does not increase the settings version to reflect that change. This issue has been caught while working on the replication of closed indices (#33888). The IndexFollowingIT.testUnfollowIndex() started to fail and this specific assertion tripped. It does not happen on master branch today because index metadata for closed indices are never updated in IndexService instances, but this is something that is going to change with the replication of closed indices. --- .../xpack/ccr/action/TransportUnfollowAction.java | 9 +++++---- .../xpack/ccr/action/TransportUnfollowActionTests.java | 3 +++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java index a42ff658dc9e1..3a158aceddb2d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java @@ -105,18 +105,19 @@ static ClusterState unfollow(String followerIndex, ClusterState current) { } } - IndexMetaData.Builder newIMD = IndexMetaData.builder(followerIMD); // Remove index.xpack.ccr.following_index setting Settings.Builder builder = Settings.builder(); builder.put(followerIMD.getSettings()); builder.remove(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey()); - newIMD.settings(builder); + final IndexMetaData.Builder newIndexMetaData = IndexMetaData.builder(followerIMD); + newIndexMetaData.settings(builder); + newIndexMetaData.settingsVersion(followerIMD.getSettingsVersion() + 1); // Remove ccr custom metadata - newIMD.removeCustom(Ccr.CCR_CUSTOM_METADATA_KEY); + newIndexMetaData.removeCustom(Ccr.CCR_CUSTOM_METADATA_KEY); MetaData newMetaData = MetaData.builder(current.metaData()) - .put(newIMD) + .put(newIndexMetaData) .build(); return ClusterState.builder(current) .metaData(newMetaData) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java index 9b9d088eea332..93987a7306f45 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java @@ -30,8 +30,10 @@ public class TransportUnfollowActionTests extends ESTestCase { public void testUnfollow() { + final long settingsVersion = randomNonNegativeLong(); IndexMetaData.Builder followerIndex = IndexMetaData.builder("follow_index") .settings(settings(Version.CURRENT).put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)) + .settingsVersion(settingsVersion) .numberOfShards(1) .numberOfReplicas(0) .state(IndexMetaData.State.CLOSE) @@ -47,6 +49,7 @@ public void testUnfollow() { IndexMetaData resultIMD = result.metaData().index("follow_index"); assertThat(resultIMD.getSettings().get(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey()), nullValue()); assertThat(resultIMD.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY), nullValue()); + assertThat(resultIMD.getSettingsVersion(), equalTo(settingsVersion + 1)); } public void testUnfollowIndexOpen() { From dfecb256cb566de2913cd00a040e289f80040428 Mon Sep 17 00:00:00 2001 From: Martijn Laarman Date: Fri, 25 Jan 2019 16:44:33 +0100 Subject: [PATCH 50/64] Exit batch files explictly using ERRORLEVEL (#29583) * Exit batch files explictly using ERRORLEVEL This makes sure the exit code is preserved when calling the batch files from different contexts other than DOS Fixes #29582 This also fixes specific error codes being masked by an explict exit /b 1 causing the useful exitcodes from ExitCodes to be lost. * fix line breaks for calling cli to match the bash scripts * indent size of bash files is 2, make sure editorconfig does the same for bat files * update indenting to match bash files * update elasticsearch-keystore.bat indenting * Update elasticsearch-node.bat to exit outside of endlocal --- .editorconfig | 3 +++ distribution/src/bin/elasticsearch-cli.bat | 2 ++ distribution/src/bin/elasticsearch-keystore.bat | 4 +++- distribution/src/bin/elasticsearch-node.bat | 4 +++- distribution/src/bin/elasticsearch-plugin.bat | 5 ++++- distribution/src/bin/elasticsearch-service.bat | 2 ++ distribution/src/bin/elasticsearch-shard.bat | 4 +++- distribution/src/bin/elasticsearch.bat | 1 + .../plugin/security/src/main/bin/elasticsearch-certgen.bat | 4 +++- .../plugin/security/src/main/bin/elasticsearch-certutil.bat | 4 +++- .../plugin/security/src/main/bin/elasticsearch-migrate.bat | 4 +++- .../security/src/main/bin/elasticsearch-saml-metadata.bat | 4 +++- .../security/src/main/bin/elasticsearch-setup-passwords.bat | 4 +++- .../plugin/security/src/main/bin/elasticsearch-syskeygen.bat | 4 +++- x-pack/plugin/security/src/main/bin/elasticsearch-users.bat | 4 +++- x-pack/plugin/sql/src/main/bin/elasticsearch-sql-cli.bat | 1 + .../plugin/watcher/src/main/bin/elasticsearch-croneval.bat | 4 +++- 17 files changed, 46 insertions(+), 12 deletions(-) diff --git a/.editorconfig b/.editorconfig index 9d4bfbf55d3a1..c3a32bede3261 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,3 +8,6 @@ indent_style = space indent_size = 4 trim_trailing_whitespace = true insert_final_newline = true + +[*.bat] +indent_size = 2 diff --git a/distribution/src/bin/elasticsearch-cli.bat b/distribution/src/bin/elasticsearch-cli.bat index e17ade3b74af1..405f97ccc8dbf 100644 --- a/distribution/src/bin/elasticsearch-cli.bat +++ b/distribution/src/bin/elasticsearch-cli.bat @@ -21,3 +21,5 @@ if defined ES_ADDITIONAL_CLASSPATH_DIRECTORIES ( -cp "%ES_CLASSPATH%" ^ "%ES_MAIN_CLASS%" ^ %* + +exit /b %ERRORLEVEL% diff --git a/distribution/src/bin/elasticsearch-keystore.bat b/distribution/src/bin/elasticsearch-keystore.bat index b43182a273f6b..83372248fb61a 100644 --- a/distribution/src/bin/elasticsearch-keystore.bat +++ b/distribution/src/bin/elasticsearch-keystore.bat @@ -6,7 +6,9 @@ setlocal enableextensions set ES_MAIN_CLASS=org.elasticsearch.common.settings.KeyStoreCli call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit endlocal endlocal +:exit +exit /b %ERRORLEVEL% diff --git a/distribution/src/bin/elasticsearch-node.bat b/distribution/src/bin/elasticsearch-node.bat index 264a357cb8af4..b152331d5ef89 100644 --- a/distribution/src/bin/elasticsearch-node.bat +++ b/distribution/src/bin/elasticsearch-node.bat @@ -6,7 +6,9 @@ setlocal enableextensions set ES_MAIN_CLASS=org.elasticsearch.cluster.coordination.NodeToolCli call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit endlocal endlocal +:exit +exit /b %ERRORLEVEL% diff --git a/distribution/src/bin/elasticsearch-plugin.bat b/distribution/src/bin/elasticsearch-plugin.bat index 7e71de790f03e..e447c7e847cf2 100644 --- a/distribution/src/bin/elasticsearch-plugin.bat +++ b/distribution/src/bin/elasticsearch-plugin.bat @@ -7,7 +7,10 @@ set ES_MAIN_CLASS=org.elasticsearch.plugins.PluginCli set ES_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/plugin-cli call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit + endlocal endlocal +:exit +exit /b %ERRORLEVEL% diff --git a/distribution/src/bin/elasticsearch-service.bat b/distribution/src/bin/elasticsearch-service.bat index f566c34c958ae..fc62c07ac9d3c 100644 --- a/distribution/src/bin/elasticsearch-service.bat +++ b/distribution/src/bin/elasticsearch-service.bat @@ -258,3 +258,5 @@ goto:eof endlocal endlocal + +exit /b %ERRORLEVEL% diff --git a/distribution/src/bin/elasticsearch-shard.bat b/distribution/src/bin/elasticsearch-shard.bat index e861b197e873d..4db48f141fd6c 100644 --- a/distribution/src/bin/elasticsearch-shard.bat +++ b/distribution/src/bin/elasticsearch-shard.bat @@ -6,7 +6,9 @@ setlocal enableextensions set ES_MAIN_CLASS=org.elasticsearch.index.shard.ShardToolCli call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit endlocal endlocal +:exit +exit /b %ERRORLEVEL% diff --git a/distribution/src/bin/elasticsearch.bat b/distribution/src/bin/elasticsearch.bat index 6e268c9b13321..9b67fa2e0ffa6 100644 --- a/distribution/src/bin/elasticsearch.bat +++ b/distribution/src/bin/elasticsearch.bat @@ -55,3 +55,4 @@ cd /d "%ES_HOME%" endlocal endlocal +exit /b %ERRORLEVEL% diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat index bb303f740e5c3..d268ea04290dd 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat @@ -12,7 +12,9 @@ set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env set ES_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/security-cli call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit endlocal endlocal +:exit +exit /b %ERRORLEVEL% diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat index 34f595824f82d..40dc4f5c29b4f 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat @@ -12,7 +12,9 @@ set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env set ES_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/security-cli call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit endlocal endlocal +:exit +exit /b %ERRORLEVEL% diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat index 4b8e4f926d797..a50bc1a384ed0 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat @@ -11,7 +11,9 @@ set ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmM set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit endlocal endlocal +:exit +exit /b %ERRORLEVEL% diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat index 64a272dfbb5a6..f39aedaff02e0 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat @@ -11,7 +11,9 @@ set ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.saml.SamlMetadataComman set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit endlocal endlocal +:exit +exit /b %ERRORLEVEL% diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat index 3c956ca47ba26..b650fcf84830f 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat @@ -11,7 +11,9 @@ set ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.esnative.tool.SetupPass set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit endlocal endlocal +:exit +exit /b %ERRORLEVEL% diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat index 11414872d073b..31fc871a5f7d0 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat @@ -11,7 +11,9 @@ set ES_MAIN_CLASS=org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit endlocal endlocal +:exit +exit /b %ERRORLEVEL% diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat index 9b35895ed86c1..b2600adfad282 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat @@ -11,7 +11,9 @@ set ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.file.tool.UsersTool set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit endlocal endlocal +:exit +exit /b %ERRORLEVEL% diff --git a/x-pack/plugin/sql/src/main/bin/elasticsearch-sql-cli.bat b/x-pack/plugin/sql/src/main/bin/elasticsearch-sql-cli.bat index cf159f0322363..48daf97b515ab 100644 --- a/x-pack/plugin/sql/src/main/bin/elasticsearch-sql-cli.bat +++ b/x-pack/plugin/sql/src/main/bin/elasticsearch-sql-cli.bat @@ -22,3 +22,4 @@ set CLI_JAR=%ES_HOME%/bin/* endlocal endlocal +exit /b %ERRORLEVEL% diff --git a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat index 2b4a33c9f9e75..571c19056bb96 100644 --- a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat +++ b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat @@ -11,7 +11,9 @@ set ES_MAIN_CLASS=org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEval set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-watcher-env call "%~dp0elasticsearch-cli.bat" ^ %%* ^ - || exit /b 1 + || goto exit endlocal endlocal +:exit +exit /b %ERRORLEVEL% From a644bc095c347b9c823b880f9b7ac12dbaec8d47 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 25 Jan 2019 16:51:53 +0100 Subject: [PATCH 51/64] Add unit tests for ShardStateAction's ShardStartedClusterStateTaskExecutor (#37756) --- .../ClusterAllocationExplainActionTests.java | 14 +- .../ClusterStateCreationUtils.java | 2 + ...rdFailedClusterStateTaskExecutorTests.java | 3 +- ...dStartedClusterStateTaskExecutorTests.java | 197 ++++++++++++++++++ .../action/shard/ShardStateActionTests.java | 144 ++++++------- 5 files changed, 281 insertions(+), 79 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java index a75510cfb64ef..d0a55972cc1d8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.AllocationDecision; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; @@ -35,6 +37,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.gateway.TestGatewayAllocator; +import java.time.Instant; import java.util.Collections; import java.util.Locale; @@ -85,7 +88,16 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing "wait until initialization has completed"; } assertEquals("{\"index\":\"idx\",\"shard\":0,\"primary\":true,\"current_state\":\"" + - shardRoutingState.toString().toLowerCase(Locale.ROOT) + "\",\"current_node\":" + + shardRoutingState.toString().toLowerCase(Locale.ROOT) + "\"" + + (shard.unassignedInfo() != null ? + ",\"unassigned_info\":{" + + "\"reason\":\"" + shard.unassignedInfo().getReason() + "\"," + + "\"at\":\""+ UnassignedInfo.DATE_TIME_FORMATTER.format( + Instant.ofEpochMilli(shard.unassignedInfo().getUnassignedTimeInMillis())) + "\"," + + "\"last_allocation_status\":\"" + AllocationDecision.fromAllocationStatus( + shard.unassignedInfo().getLastAllocationStatus()) + "\"}" + : "") + + ",\"current_node\":" + "{\"id\":\"" + cae.getCurrentNode().getId() + "\",\"name\":\"" + cae.getCurrentNode().getName() + "\",\"transport_address\":\"" + cae.getCurrentNode().getAddress() + "\"},\"explanation\":\"" + explanation + "\"}", Strings.toString(builder)); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 60053748d68c9..6b628d88c59d4 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -118,6 +118,8 @@ public static ClusterState state(String index, boolean activePrimaryLocal, Shard } if (primaryState == ShardRoutingState.RELOCATING) { relocatingNode = selectAndRemove(unassignedNodes); + } else if (primaryState == ShardRoutingState.INITIALIZING) { + unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); } } else { unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index 60a5d4a3e3f1f..4dbe62cf5cebb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -48,7 +48,6 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; -import org.junit.Before; import java.util.ArrayList; import java.util.Arrays; @@ -73,7 +72,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa private ClusterState clusterState; private ShardStateAction.ShardFailedClusterStateTaskExecutor executor; - @Before + @Override public void setUp() throws Exception { super.setUp(); allocationService = createAllocationService(Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java new file mode 100644 index 0000000000000..1d3a523cdc94f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java @@ -0,0 +1,197 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.action.shard; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.action.shard.ShardStateAction.StartedShardEntry; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.ShardId; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; +import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithAssignedPrimariesAndReplicas; +import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithNoShard; +import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class ShardStartedClusterStateTaskExecutorTests extends ESAllocationTestCase { + + private ShardStateAction.ShardStartedClusterStateTaskExecutor executor; + + @Override + public void setUp() throws Exception { + super.setUp(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), Integer.MAX_VALUE) + .build()); + executor = new ShardStateAction.ShardStartedClusterStateTaskExecutor(allocationService, logger); + } + + public void testEmptyTaskListProducesSameClusterState() throws Exception { + final ClusterState clusterState = stateWithNoShard(); + assertTasksExecution(clusterState, Collections.emptyList(), result -> assertSame(clusterState, result.resultingState)); + } + + public void testNonExistentIndexMarkedAsSuccessful() throws Exception { + final ClusterState clusterState = stateWithNoShard(); + final StartedShardEntry entry = new StartedShardEntry(new ShardId("test", "_na", 0), "aId", "test"); + assertTasksExecution(clusterState, singletonList(entry), + result -> { + assertSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(1)); + assertThat(result.executionResults.containsKey(entry), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(entry)).isSuccess(), is(true)); + }); + } + + public void testNonExistentShardsAreMarkedAsSuccessful() throws Exception { + final String indexName = "test"; + final ClusterState clusterState = stateWithActivePrimary(indexName, true, randomInt(2), randomInt(2)); + + final IndexMetaData indexMetaData = clusterState.metaData().index(indexName); + final List tasks = Stream.concat( + // Existent shard id but different allocation id + IntStream.range(0, randomIntBetween(1, 5)) + .mapToObj(i -> new StartedShardEntry(new ShardId(indexMetaData.getIndex(), 0), String.valueOf(i), "allocation id")), + // Non existent shard id + IntStream.range(1, randomIntBetween(2, 5)) + .mapToObj(i -> new StartedShardEntry(new ShardId(indexMetaData.getIndex(), i), String.valueOf(i), "shard id")) + + ).collect(Collectors.toList()); + + assertTasksExecution(clusterState, tasks, result -> { + assertSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(tasks.size())); + tasks.forEach(task -> { + assertThat(result.executionResults.containsKey(task), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); + }); + }); + } + + public void testNonInitializingShardAreMarkedAsSuccessful() throws Exception { + final String indexName = "test"; + final ClusterState clusterState = stateWithAssignedPrimariesAndReplicas(new String[]{indexName}, randomIntBetween(2, 10), 1); + + final IndexMetaData indexMetaData = clusterState.metaData().index(indexName); + final List tasks = IntStream.range(0, randomIntBetween(1, indexMetaData.getNumberOfShards())) + .mapToObj(i -> { + final ShardId shardId = new ShardId(indexMetaData.getIndex(), i); + final IndexShardRoutingTable shardRoutingTable = clusterState.routingTable().shardRoutingTable(shardId); + final String allocationId; + if (randomBoolean()) { + allocationId = shardRoutingTable.primaryShard().allocationId().getId(); + } else { + allocationId = shardRoutingTable.replicaShards().iterator().next().allocationId().getId(); + } + return new StartedShardEntry(shardId, allocationId, "test"); + }).collect(Collectors.toList()); + + assertTasksExecution(clusterState, tasks, result -> { + assertSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(tasks.size())); + tasks.forEach(task -> { + assertThat(result.executionResults.containsKey(task), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); + }); + }); + } + + public void testStartedShards() throws Exception { + final String indexName = "test"; + final ClusterState clusterState = state(indexName, randomBoolean(), ShardRoutingState.INITIALIZING, ShardRoutingState.INITIALIZING); + + final IndexMetaData indexMetaData = clusterState.metaData().index(indexName); + final ShardId shardId = new ShardId(indexMetaData.getIndex(), 0); + final ShardRouting primaryShard = clusterState.routingTable().shardRoutingTable(shardId).primaryShard(); + final String primaryAllocationId = primaryShard.allocationId().getId(); + + final List tasks = new ArrayList<>(); + tasks.add(new StartedShardEntry(shardId, primaryAllocationId, "test")); + if (randomBoolean()) { + final ShardRouting replicaShard = clusterState.routingTable().shardRoutingTable(shardId).replicaShards().iterator().next(); + final String replicaAllocationId = replicaShard.allocationId().getId(); + tasks.add(new StartedShardEntry(shardId, replicaAllocationId, "test")); + } + assertTasksExecution(clusterState, tasks, result -> { + assertNotSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(tasks.size())); + tasks.forEach(task -> { + assertThat(result.executionResults.containsKey(task), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); + + final IndexShardRoutingTable shardRoutingTable = result.resultingState.routingTable().shardRoutingTable(task.shardId); + assertThat(shardRoutingTable.getByAllocationId(task.allocationId).state(), is(ShardRoutingState.STARTED)); + }); + }); + } + + public void testDuplicateStartsAreOkay() throws Exception { + final String indexName = "test"; + final ClusterState clusterState = state(indexName, randomBoolean(), ShardRoutingState.INITIALIZING); + + final IndexMetaData indexMetaData = clusterState.metaData().index(indexName); + final ShardId shardId = new ShardId(indexMetaData.getIndex(), 0); + final ShardRouting shardRouting = clusterState.routingTable().shardRoutingTable(shardId).primaryShard(); + final String allocationId = shardRouting.allocationId().getId(); + + final List tasks = IntStream.range(0, randomIntBetween(2, 10)) + .mapToObj(i -> new StartedShardEntry(shardId, allocationId, "test")) + .collect(Collectors.toList()); + + assertTasksExecution(clusterState, tasks, result -> { + assertNotSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(tasks.size())); + tasks.forEach(task -> { + assertThat(result.executionResults.containsKey(task), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); + + final IndexShardRoutingTable shardRoutingTable = result.resultingState.routingTable().shardRoutingTable(task.shardId); + assertThat(shardRoutingTable.getByAllocationId(task.allocationId).state(), is(ShardRoutingState.STARTED)); + }); + }); + } + + private void assertTasksExecution(final ClusterState state, + final List tasks, + final Consumer consumer) throws Exception { + final ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(state, tasks); + assertThat(result, notNullValue()); + consumer.accept(result); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index 2a994e2861836..e94a974ae7a89 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.action.shard; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.cluster.ClusterState; @@ -156,24 +157,9 @@ public void testSuccess() throws InterruptedException { setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); - AtomicBoolean success = new AtomicBoolean(); - CountDownLatch latch = new CountDownLatch(1); - + final TestListener listener = new TestListener(); ShardRouting shardRouting = getRandomShardRouting(index); - shardStateAction.localShardFailed(shardRouting, "test", getSimulatedFailure(), new ShardStateAction.Listener() { - @Override - public void onSuccess() { - success.set(true); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - success.set(false); - latch.countDown(); - assert false; - } - }); + shardStateAction.localShardFailed(shardRouting, "test", getSimulatedFailure(), listener); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertEquals(1, capturedRequests.length); @@ -188,8 +174,8 @@ public void onFailure(Exception e) { transport.handleResponse(capturedRequests[0].requestId, TransportResponse.Empty.INSTANCE); - latch.await(); - assertTrue(success.get()); + listener.await(); + assertNull(listener.failure.get()); } public void testNoMaster() throws InterruptedException { @@ -291,28 +277,14 @@ public void testUnhandledFailure() { setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); - AtomicBoolean failure = new AtomicBoolean(); - + final TestListener listener = new TestListener(); ShardRouting failedShard = getRandomShardRouting(index); - shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { - @Override - public void onSuccess() { - failure.set(false); - assert false; - } - - @Override - public void onFailure(Exception e) { - failure.set(true); - } - }); + shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), listener); final CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests.length, equalTo(1)); - assertFalse(failure.get()); transport.handleRemoteError(capturedRequests[0].requestId, new TransportException("simulated")); - - assertTrue(failure.get()); + assertNotNull(listener.failure.get()); } public void testShardNotFound() throws InterruptedException { @@ -320,32 +292,17 @@ public void testShardNotFound() throws InterruptedException { setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); - AtomicBoolean success = new AtomicBoolean(); - CountDownLatch latch = new CountDownLatch(1); - + final TestListener listener = new TestListener(); ShardRouting failedShard = getRandomShardRouting(index); RoutingTable routingTable = RoutingTable.builder(clusterService.state().getRoutingTable()).remove(index).build(); setState(clusterService, ClusterState.builder(clusterService.state()).routingTable(routingTable)); - shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { - @Override - public void onSuccess() { - success.set(true); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - success.set(false); - latch.countDown(); - assert false; - } - }); + shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), listener); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); transport.handleResponse(capturedRequests[0].requestId, TransportResponse.Empty.INSTANCE); - latch.await(); - assertTrue(success.get()); + listener.await(); + assertNull(listener.failure.get()); } public void testNoLongerPrimaryShardException() throws InterruptedException { @@ -355,36 +312,23 @@ public void testNoLongerPrimaryShardException() throws InterruptedException { ShardRouting failedShard = getRandomShardRouting(index); - AtomicReference failure = new AtomicReference<>(); - CountDownLatch latch = new CountDownLatch(1); - + final TestListener listener = new TestListener(); long primaryTerm = clusterService.state().metaData().index(index).primaryTerm(failedShard.id()); assertThat(primaryTerm, greaterThanOrEqualTo(1L)); shardStateAction.remoteShardFailed(failedShard.shardId(), failedShard.allocationId().getId(), - primaryTerm + 1, randomBoolean(), "test", getSimulatedFailure(), - new ShardStateAction.Listener() { - @Override - public void onSuccess() { - failure.set(null); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - failure.set(e); - latch.countDown(); - } - }); + primaryTerm + 1, randomBoolean(), "test", getSimulatedFailure(), listener); ShardStateAction.NoLongerPrimaryShardException catastrophicError = new ShardStateAction.NoLongerPrimaryShardException(failedShard.shardId(), "dummy failure"); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); transport.handleRemoteError(capturedRequests[0].requestId, catastrophicError); - latch.await(); - assertNotNull(failure.get()); - assertThat(failure.get(), instanceOf(ShardStateAction.NoLongerPrimaryShardException.class)); - assertThat(failure.get().getMessage(), equalTo(catastrophicError.getMessage())); + listener.await(); + + final Exception failure = listener.failure.get(); + assertNotNull(failure); + assertThat(failure, instanceOf(ShardStateAction.NoLongerPrimaryShardException.class)); + assertThat(failure.getMessage(), equalTo(catastrophicError.getMessage())); } public void testCacheRemoteShardFailed() throws Exception { @@ -471,6 +415,26 @@ public void onFailure(Exception e) { masterThread.join(); } + public void testShardStarted() throws InterruptedException { + final String index = "test"; + setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); + + final ShardRouting shardRouting = getRandomShardRouting(index); + final TestListener listener = new TestListener(); + shardStateAction.shardStarted(shardRouting, "testShardStarted", listener); + + final CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); + assertThat(capturedRequests[0].request, instanceOf(ShardStateAction.StartedShardEntry.class)); + + ShardStateAction.StartedShardEntry entry = (ShardStateAction.StartedShardEntry) capturedRequests[0].request; + assertThat(entry.shardId, equalTo(shardRouting.shardId())); + assertThat(entry.allocationId, equalTo(shardRouting.allocationId().getId())); + + transport.handleResponse(capturedRequests[0].requestId, TransportResponse.Empty.INSTANCE); + listener.await(); + assertNull(listener.failure.get()); + } + private ShardRouting getRandomShardRouting(String index) { IndexRoutingTable indexRoutingTable = clusterService.state().routingTable().index(index); ShardsIterator shardsIterator = indexRoutingTable.randomAllActiveShardsIt(); @@ -600,4 +564,32 @@ public void onFailure(Exception e) { } }); } + + private static class TestListener implements ShardStateAction.Listener { + + private final SetOnce failure = new SetOnce<>(); + private final CountDownLatch latch = new CountDownLatch(1); + + @Override + public void onSuccess() { + try { + failure.set(null); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(final Exception e) { + try { + failure.set(e); + } finally { + latch.countDown(); + } + } + + void await() throws InterruptedException { + latch.await(); + } + } } From acc3cae40c21f1807b1cbbe4663a8e7a432744da Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 25 Jan 2019 08:07:49 -0800 Subject: [PATCH 52/64] Remove "reinstall" packaging tests (#37851) The packaging tests currently have a test which installs elasticsearch, removes it, modifies ownership of /etc/elasticsearch, and reinstalls. It then checks that the /etc/elasticsearch directory has ownership that the package expects. But the recursive change touches files not owned by the package. In the past this worked because we did a recursive ownership change within the package postinst. However, that was recently removed, and thus this test no longer makes sense. --- .../packaging/tests/90_reinstall.bats | 78 ------------------- .../test/resources/packaging/utils/utils.bash | 11 --- 2 files changed, 89 deletions(-) delete mode 100644 qa/vagrant/src/test/resources/packaging/tests/90_reinstall.bats diff --git a/qa/vagrant/src/test/resources/packaging/tests/90_reinstall.bats b/qa/vagrant/src/test/resources/packaging/tests/90_reinstall.bats deleted file mode 100644 index 7c5f05c65e894..0000000000000 --- a/qa/vagrant/src/test/resources/packaging/tests/90_reinstall.bats +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bats - -# Tests upgrading elasticsearch from a previous version with the deb or rpm -# packages. Just uses a single node cluster on the current machine rather than -# fancy rolling restarts. - -# WARNING: This testing file must be executed as root and can -# dramatically change your system. It should only be executed -# in a throw-away VM like those made by the Vagrantfile at -# the root of the Elasticsearch source code. This should -# cause the script to fail if it is executed any other way: -[ -f /etc/is_vagrant_vm ] || { - >&2 echo "must be run on a vagrant VM" - exit 1 -} - -# The test case can be executed with the Bash Automated -# Testing System tool available at https://github.com/sstephenson/bats -# Thanks to Sam Stephenson! - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Load test utilities -load $BATS_UTILS/utils.bash -load $BATS_UTILS/packages.bash - -# Cleans everything for the 1st execution -setup() { - skip_not_dpkg_or_rpm - export PACKAGE_NAME="elasticsearch-oss" -} - -@test "[REINSTALL] install" { - clean_before_test - install_package -} - -@test "[REINSTALL] purge elasticsearch" { - purge_elasticsearch -} - -@test "[REINSTALL] chown directories" { - # to simulate the loss of ownership - if [ -d /var/lib/elasticsearch ]; then - sudo chown -R root:root /var/lib/elasticsearch - fi - if [ -d "/var/log/elasticsearch" ]; then - sudo chown -R root:root /var/log/elasticsearch - fi - if [ -d /etc/elasticsearch ]; then - sudo chown -R root:root /etc/elasticsearch - fi -} - -@test "[REINSTALL] reinstall elasticsearch" { - install_package -} - -@test "[REINSTALL] check ownership" { - assert_recursive_ownership /var/lib/elasticsearch elasticsearch elasticsearch - assert_recursive_ownership /var/log/elasticsearch elasticsearch elasticsearch - assert_recursive_ownership /etc/elasticsearch root elasticsearch -} diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index 18363a5ac6241..92363d4d4e348 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -233,17 +233,6 @@ assert_output() { echo "$output" | grep -E "$1" } -assert_recursive_ownership() { - local directory=$1 - local user=$2 - local group=$3 - - realuser=$(find $directory -printf "%u\n" | sort | uniq) - [ "$realuser" = "$user" ] - realgroup=$(find $directory -printf "%g\n" | sort | uniq) - [ "$realgroup" = "$group" ] -} - # Deletes everything before running a test file clean_before_test() { From e88ae99340505b607fb32b1a1509ff0afb6f3651 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 25 Jan 2019 08:11:48 -0800 Subject: [PATCH 53/64] Remove NOREPLACE for /etc/elasticsearch in rpm and deb (#37839) The /etc/elasticsearch directory is currently configured as a config file with noreplace. However, the directory itself is not config, and can lead to an entire /etc/elasticsearch.rpmsave directory in some situations. This commit fixes the ospackage config to not specify those file bits for the directory itself, but only the files underneath it. --- distribution/packages/build.gradle | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index e0c9dafc5e9b4..0b573ed9bad13 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -83,6 +83,9 @@ void addProcessFilesTask(String type, boolean oss) { mkdir "${packagingFiles}/var/log/elasticsearch" mkdir "${packagingFiles}/var/lib/elasticsearch" mkdir "${packagingFiles}/usr/share/elasticsearch/plugins" + + // bare empty dir for /etc/elasticsearch + mkdir "${packagingFiles}/elasticsearch" } } } @@ -173,14 +176,22 @@ Closure commonPackageConfig(String type, boolean oss) { configurationFile '/etc/elasticsearch/users' configurationFile '/etc/elasticsearch/users_roles' } - into('/etc') { + from("${packagingFiles}") { + dirMode 02750 + into('/etc') + permissionGroup 'elasticsearch' + includeEmptyDirs true + createDirectoryEntry true + include("elasticsearch") // empty dir, just to add directory entry + } + from("${packagingFiles}/etc/elasticsearch") { + into('/etc/elasticsearch') dirMode 02750 fileMode 0660 permissionGroup 'elasticsearch' includeEmptyDirs true createDirectoryEntry true fileType CONFIG | NOREPLACE - from "${packagingFiles}/etc" } String envFile = expansionsForDistribution(type, false)['path.env'] configurationFile envFile From 68149b6058a720ec891974e8a6982c5e480fb9f1 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 25 Jan 2019 11:37:27 -0500 Subject: [PATCH 54/64] Geo: replace intermediate geo objects with libs/geo (#37721) Replaces intermediate geo objects built by ShapeBuilders with objects from the libs/geo hierarchy. This should allow us to build all geo functionality around a single hierarchy. Follow up for #35320 --- build.gradle | 1 + .../org/elasticsearch/geo/geometry/Line.java | 8 ++ .../org/elasticsearch/geo/geometry/Point.java | 4 +- .../geo/utils/WellKnownText.java | 6 +- server/build.gradle | 3 +- .../common/geo/GeoShapeType.java | 4 +- .../common/geo/builders/CircleBuilder.java | 4 +- .../common/geo/builders/EnvelopeBuilder.java | 6 +- .../builders/GeometryCollectionBuilder.java | 17 +-- .../geo/builders/LineStringBuilder.java | 22 +-- .../geo/builders/MultiLineStringBuilder.java | 28 ++-- .../geo/builders/MultiPointBuilder.java | 15 +- .../geo/builders/MultiPolygonBuilder.java | 35 ++--- .../common/geo/builders/PointBuilder.java | 12 +- .../common/geo/builders/PolygonBuilder.java | 55 +++---- .../common/geo/builders/ShapeBuilder.java | 7 +- .../index/mapper/GeoShapeFieldMapper.java | 131 ++++++++++++----- .../index/query/GeoShapeQueryBuilder.java | 135 +++++++++++------- .../common/geo/BaseGeoParsingTestCase.java | 2 +- .../common/geo/GeoJsonShapeParserTests.java | 132 +++++++++-------- .../common/geo/GeoWKTShapeParserTests.java | 48 ++++--- .../common/geo/ShapeBuilderTests.java | 116 +++++++-------- .../AbstractShapeBuilderTestCase.java | 4 +- .../GeometryCollectionBuilderTests.java | 2 +- .../index/mapper/ExternalMapper.java | 2 +- .../query/GeoPolygonQueryBuilderTests.java | 2 +- .../query/GeoShapeQueryBuilderTests.java | 12 +- .../query/LegacyGeoShapeFieldQueryTests.java | 4 +- .../hamcrest/ElasticsearchGeoAssertions.java | 13 +- 29 files changed, 465 insertions(+), 365 deletions(-) diff --git a/build.gradle b/build.gradle index c5611e8b453fb..4bd211a12b3b0 100644 --- a/build.gradle +++ b/build.gradle @@ -212,6 +212,7 @@ allprojects { "org.elasticsearch:elasticsearch-core:${version}": ':libs:core', "org.elasticsearch:elasticsearch-nio:${version}": ':libs:nio', "org.elasticsearch:elasticsearch-x-content:${version}": ':libs:x-content', + "org.elasticsearch:elasticsearch-geo:${version}": ':libs:elasticsearch-geo', "org.elasticsearch:elasticsearch-secure-sm:${version}": ':libs:secure-sm', "org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest', "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer', diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java index 415dacfce9b3c..348537205af55 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java @@ -67,6 +67,14 @@ public double getLon(int i) { return lons[i]; } + public double[] getLats() { + return lats.clone(); + } + + public double[] getLons() { + return lons.clone(); + } + @Override public ShapeType type() { return ShapeType.LINESTRING; diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java index d85d40c8dc789..c272261ee3a9c 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java @@ -48,11 +48,11 @@ public ShapeType type() { return ShapeType.POINT; } - public double lat() { + public double getLat() { return lat; } - public double lon() { + public double getLon() { return lon; } diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java b/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java index 5cf29065b006a..5fa585be28b24 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java @@ -123,12 +123,12 @@ public Void visit(MultiLine multiLine) { public Void visit(MultiPoint multiPoint) { // walk through coordinates: sb.append(LPAREN); - visitPoint(multiPoint.get(0).lon(), multiPoint.get(0).lat()); + visitPoint(multiPoint.get(0).getLon(), multiPoint.get(0).getLat()); for (int i = 1; i < multiPoint.size(); ++i) { sb.append(COMMA); sb.append(SPACE); Point point = multiPoint.get(i); - visitPoint(point.lon(), point.lat()); + visitPoint(point.getLon(), point.getLat()); } sb.append(RPAREN); return null; @@ -146,7 +146,7 @@ public Void visit(Point point) { sb.append(EMPTY); } else { sb.append(LPAREN); - visitPoint(point.lon(), point.lat()); + visitPoint(point.getLon(), point.getLat()); sb.append(RPAREN); } return null; diff --git a/server/build.gradle b/server/build.gradle index a3197acde4ad8..5e2ae5939dad0 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -77,7 +77,8 @@ dependencies { compile "org.elasticsearch:elasticsearch-core:${version}" compile "org.elasticsearch:elasticsearch-secure-sm:${version}" compile "org.elasticsearch:elasticsearch-x-content:${version}" - + compile "org.elasticsearch:elasticsearch-geo:${version}" + compileOnly project(':libs:plugin-classloader') testRuntime project(':libs:plugin-classloader') diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java b/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java index 5022f66550c7f..aefd189edaf88 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java @@ -258,7 +258,7 @@ CoordinateNode validate(CoordinateNode coordinates, boolean coerce) { }, GEOMETRYCOLLECTION("geometrycollection") { @Override - public ShapeBuilder getBuilder(CoordinateNode coordinates, DistanceUnit.Distance radius, + public ShapeBuilder getBuilder(CoordinateNode coordinates, DistanceUnit.Distance radius, Orientation orientation, boolean coerce) { // noop, handled in parser return null; @@ -298,7 +298,7 @@ public static GeoShapeType forName(String geoshapename) { throw new IllegalArgumentException("unknown geo_shape ["+geoshapename+"]"); } - public abstract ShapeBuilder getBuilder(CoordinateNode coordinates, DistanceUnit.Distance radius, + public abstract ShapeBuilder getBuilder(CoordinateNode coordinates, DistanceUnit.Distance radius, ShapeBuilder.Orientation orientation, boolean coerce); abstract CoordinateNode validate(CoordinateNode coordinates, boolean coerce); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index 5e2b6cc1c4453..ec4e01fa8c87e 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -34,7 +34,7 @@ import java.io.IOException; import java.util.Objects; -public class CircleBuilder extends ShapeBuilder { +public class CircleBuilder extends ShapeBuilder { public static final ParseField FIELD_RADIUS = new ParseField("radius"); public static final GeoShapeType TYPE = GeoShapeType.CIRCLE; @@ -164,7 +164,7 @@ public Circle buildS4J() { } @Override - public Object buildLucene() { + public org.elasticsearch.geo.geometry.Circle buildGeometry() { throw new UnsupportedOperationException("CIRCLE geometry is not supported"); } diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 5f69f4ad44dba..09fa2f7da5544 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -32,7 +32,7 @@ import java.io.IOException; import java.util.Objects; -public class EnvelopeBuilder extends ShapeBuilder { +public class EnvelopeBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE; @@ -113,8 +113,8 @@ public Rectangle buildS4J() { } @Override - public org.apache.lucene.geo.Rectangle buildLucene() { - return new org.apache.lucene.geo.Rectangle(bottomRight.y, topLeft.y, topLeft.x, bottomRight.x); + public org.elasticsearch.geo.geometry.Rectangle buildGeometry() { + return new org.elasticsearch.geo.geometry.Rectangle(bottomRight.y, topLeft.y, topLeft.x, bottomRight.x); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java index fdf7073bd7454..fb3ff6203ed45 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java @@ -31,11 +31,11 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Objects; -public class GeometryCollectionBuilder extends ShapeBuilder { +public class GeometryCollectionBuilder extends ShapeBuilder, GeometryCollectionBuilder> { public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION; @@ -185,19 +185,14 @@ public Shape buildS4J() { } @Override - public Object buildLucene() { - List shapes = new ArrayList<>(this.shapes.size()); + public org.elasticsearch.geo.geometry.GeometryCollection buildGeometry() { + List shapes = new ArrayList<>(this.shapes.size()); for (ShapeBuilder shape : this.shapes) { - Object o = shape.buildLucene(); - if (o.getClass().isArray()) { - shapes.addAll(Arrays.asList((Object[])o)); - } else { - shapes.add(o); - } + shapes.add(shape.buildGeometry()); } - return shapes.toArray(new Object[shapes.size()]); + return new org.elasticsearch.geo.geometry.GeometryCollection<>(shapes); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index 07edb241cd76c..8e1e9d7a993b2 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -19,16 +19,16 @@ package org.elasticsearch.common.geo.builders; -import org.apache.lucene.geo.Line; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.jts.geom.Geometry; -import org.locationtech.jts.geom.GeometryFactory; -import org.locationtech.jts.geom.LineString; - import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.MultiLine; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.LineString; import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; @@ -39,7 +39,7 @@ import static org.elasticsearch.common.geo.GeoUtils.normalizeLat; import static org.elasticsearch.common.geo.GeoUtils.normalizeLon; -public class LineStringBuilder extends ShapeBuilder { +public class LineStringBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.LINESTRING; /** @@ -125,15 +125,15 @@ public JtsGeometry buildS4J() { } @Override - public Object buildLucene() { + public org.elasticsearch.geo.geometry.Geometry buildGeometry() { // decompose linestrings crossing dateline into array of Lines Coordinate[] coordinates = this.coordinates.toArray(new Coordinate[this.coordinates.size()]); if (wrapdateline) { - ArrayList linestrings = decomposeLucene(coordinates, new ArrayList<>()); + List linestrings = decomposeGeometry(coordinates, new ArrayList<>()); if (linestrings.size() == 1) { return linestrings.get(0); } else { - return linestrings.toArray(new Line[linestrings.size()]); + return new MultiLine(linestrings); } } return new Line(Arrays.stream(coordinates).mapToDouble(i->normalizeLat(i.y)).toArray(), @@ -149,7 +149,7 @@ static ArrayList decomposeS4J(GeometryFactory factory, Coordinate[] return strings; } - static ArrayList decomposeLucene(Coordinate[] coordinates, ArrayList lines) { + static List decomposeGeometry(Coordinate[] coordinates, List lines) { for (Coordinate[] part : decompose(+DATELINE, coordinates)) { for (Coordinate[] line : decompose(-DATELINE, part)) { lines.add(new Line(Arrays.stream(line).mapToDouble(i->normalizeLat(i.y)).toArray(), diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index 9902744fc3b2c..a283cda874528 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -19,25 +19,25 @@ package org.elasticsearch.common.geo.builders; -import org.apache.lucene.geo.Line; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.elasticsearch.common.geo.parsers.ShapeParser; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.jts.geom.Geometry; -import org.locationtech.jts.geom.LineString; - import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.geo.geometry.MultiLine; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.LineString; import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; +import java.util.List; import java.util.Objects; -public class MultiLineStringBuilder extends ShapeBuilder { +public class MultiLineStringBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING; @@ -150,24 +150,24 @@ public JtsGeometry buildS4J() { } @Override - public Object buildLucene() { + public org.elasticsearch.geo.geometry.Geometry buildGeometry() { if (wrapdateline) { - ArrayList parts = new ArrayList<>(); + List parts = new ArrayList<>(); for (LineStringBuilder line : lines) { - LineStringBuilder.decomposeLucene(line.coordinates(false), parts); + LineStringBuilder.decomposeGeometry(line.coordinates(false), parts); } if (parts.size() == 1) { return parts.get(0); } - return parts.toArray(new Line[parts.size()]); + return new MultiLine(parts); } - Line[] linestrings = new Line[lines.size()]; + List linestrings = new ArrayList<>(lines.size()); for (int i = 0; i < lines.size(); ++i) { LineStringBuilder lsb = lines.get(i); - linestrings[i] = new Line(lsb.coordinates.stream().mapToDouble(c->c.y).toArray(), - lsb.coordinates.stream().mapToDouble(c->c.x).toArray()); + linestrings.add(new org.elasticsearch.geo.geometry.Line(lsb.coordinates.stream().mapToDouble(c->c.y).toArray(), + lsb.coordinates.stream().mapToDouble(c->c.x).toArray())); } - return linestrings; + return new MultiLine(linestrings); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index b559bb581179e..c92d67e8291ea 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -24,14 +24,16 @@ import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.geo.geometry.MultiPoint; import org.locationtech.jts.geom.Coordinate; import org.locationtech.spatial4j.shape.Point; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; -public class MultiPointBuilder extends ShapeBuilder, MultiPointBuilder> { +public class MultiPointBuilder extends ShapeBuilder, MultiPoint, MultiPointBuilder> { public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT; @@ -74,14 +76,9 @@ public XShapeCollection buildS4J() { } @Override - public double[][] buildLucene() { - double[][] points = new double[coordinates.size()][]; - Coordinate coord; - for (int i = 0; i < coordinates.size(); ++i) { - coord = coordinates.get(i); - points[i] = new double[] {coord.x, coord.y}; - } - return points; + public MultiPoint buildGeometry() { + return new MultiPoint(coordinates.stream().map(coord -> new org.elasticsearch.geo.geometry.Point(coord.y, coord.x)) + .collect(Collectors.toList())); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index 95c2bbc6275a6..be0741306c097 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -26,17 +26,17 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.geo.geometry.MultiPolygon; import org.locationtech.jts.geom.Coordinate; import org.locationtech.spatial4j.shape.Shape; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.Objects; -public class MultiPolygonBuilder extends ShapeBuilder { +public class MultiPolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON; @@ -185,31 +185,20 @@ public Shape buildS4J() { //note: ShapeCollection is probably faster than a Multi* geom. } + @SuppressWarnings({"unchecked"}) @Override - public Object buildLucene() { - List shapes = new ArrayList<>(this.polygons.size()); + public MultiPolygon buildGeometry() { + List shapes = new ArrayList<>(this.polygons.size()); Object poly; - if (wrapdateline) { - for (PolygonBuilder polygon : this.polygons) { - poly = polygon.buildLucene(); - if (poly instanceof org.apache.lucene.geo.Polygon[]) { - shapes.addAll(Arrays.asList((org.apache.lucene.geo.Polygon[])poly)); - } else { - shapes.add((org.apache.lucene.geo.Polygon)poly); - } - } - } else { - for (int i = 0; i < this.polygons.size(); ++i) { - PolygonBuilder pb = this.polygons.get(i); - poly = pb.buildLucene(); - if (poly instanceof org.apache.lucene.geo.Polygon[]) { - shapes.addAll(Arrays.asList((org.apache.lucene.geo.Polygon[])poly)); - } else { - shapes.add((org.apache.lucene.geo.Polygon)poly); - } + for (PolygonBuilder polygon : this.polygons) { + poly = polygon.buildGeometry(); + if (poly instanceof List) { + shapes.addAll((List) poly); + } else { + shapes.add((org.elasticsearch.geo.geometry.Polygon)poly); } } - return shapes.stream().toArray(org.apache.lucene.geo.Polygon[]::new); + return new MultiPolygon(shapes); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index c13eca936e492..6a4fcd064c5da 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -19,18 +19,16 @@ package org.elasticsearch.common.geo.builders; -import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; -import org.locationtech.spatial4j.shape.Point; -import org.locationtech.jts.geom.Coordinate; - import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.shape.Point; import java.io.IOException; -public class PointBuilder extends ShapeBuilder { +public class PointBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POINT; /** @@ -90,8 +88,8 @@ public Point buildS4J() { } @Override - public GeoPoint buildLucene() { - return new GeoPoint(coordinates.get(0).y, coordinates.get(0).x); + public org.elasticsearch.geo.geometry.Point buildGeometry() { + return new org.elasticsearch.geo.geometry.Point(coordinates.get(0).y, coordinates.get(0).x); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index ac19642949c86..e4751de04bfe3 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -38,6 +38,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -55,7 +56,7 @@ * Methods to wrap polygons at the dateline and building shapes from the data held by the * builder. */ -public class PolygonBuilder extends ShapeBuilder { +public class PolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POLYGON; @@ -233,14 +234,14 @@ public JtsGeometry buildS4J() { } @Override - public Object buildLucene() { + public org.elasticsearch.geo.geometry.Geometry buildGeometry() { if (wrapdateline) { Coordinate[][][] polygons = coordinates(); return polygons.length == 1 - ? polygonLucene(polygons[0]) - : multipolygonLucene(polygons); + ? polygonGeometry(polygons[0]) + : multipolygon(polygons); } - return toPolygonLucene(); + return toPolygonGeometry(); } protected XContentBuilder coordinatesArray(XContentBuilder builder, Params params) throws IOException { @@ -288,17 +289,19 @@ protected Polygon toPolygonS4J(GeometryFactory factory) { return factory.createPolygon(shell, holes); } - public Object toPolygonLucene() { - final org.apache.lucene.geo.Polygon[] holes = new org.apache.lucene.geo.Polygon[this.holes.size()]; - for (int i = 0; i < holes.length; ++i) { - holes[i] = linearRing(this.holes.get(i).coordinates); + public org.elasticsearch.geo.geometry.Polygon toPolygonGeometry() { + final List holes = new ArrayList<>(this.holes.size()); + for (int i = 0; i < this.holes.size(); ++i) { + holes.add(linearRing(this.holes.get(i).coordinates)); } - return new org.apache.lucene.geo.Polygon(this.shell.coordinates.stream().mapToDouble(i -> normalizeLat(i.y)).toArray(), - this.shell.coordinates.stream().mapToDouble(i -> normalizeLon(i.x)).toArray(), holes); + return new org.elasticsearch.geo.geometry.Polygon( + new org.elasticsearch.geo.geometry.LinearRing( + this.shell.coordinates.stream().mapToDouble(i -> normalizeLat(i.y)).toArray(), + this.shell.coordinates.stream().mapToDouble(i -> normalizeLon(i.x)).toArray()), holes); } - protected static org.apache.lucene.geo.Polygon linearRing(List coordinates) { - return new org.apache.lucene.geo.Polygon(coordinates.stream().mapToDouble(i -> normalizeLat(i.y)).toArray(), + protected static org.elasticsearch.geo.geometry.LinearRing linearRing(List coordinates) { + return new org.elasticsearch.geo.geometry.LinearRing(coordinates.stream().mapToDouble(i -> normalizeLat(i.y)).toArray(), coordinates.stream().mapToDouble(i -> normalizeLon(i.x)).toArray()); } @@ -335,13 +338,13 @@ protected static Polygon polygonS4J(GeometryFactory factory, Coordinate[][] poly return factory.createPolygon(shell, holes); } - protected static org.apache.lucene.geo.Polygon polygonLucene(Coordinate[][] polygon) { - org.apache.lucene.geo.Polygon[] holes; + protected static org.elasticsearch.geo.geometry.Polygon polygonGeometry(Coordinate[][] polygon) { + List holes; Coordinate[] shell = polygon[0]; if (polygon.length > 1) { - holes = new org.apache.lucene.geo.Polygon[polygon.length - 1]; - for (int i = 0; i < holes.length; ++i) { - Coordinate[] coords = polygon[i+1]; + holes = new ArrayList<>(polygon.length - 1); + for (int i = 1; i < polygon.length; ++i) { + Coordinate[] coords = polygon[i]; //We do not have holes on the dateline as they get eliminated //when breaking the polygon around it. double[] x = new double[coords.length]; @@ -350,10 +353,10 @@ protected static org.apache.lucene.geo.Polygon polygonLucene(Coordinate[][] poly x[c] = normalizeLon(coords[c].x); y[c] = normalizeLat(coords[c].y); } - holes[i] = new org.apache.lucene.geo.Polygon(y, x); + holes.add(new org.elasticsearch.geo.geometry.LinearRing(y, x)); } } else { - holes = new org.apache.lucene.geo.Polygon[0]; + holes = Collections.emptyList(); } double[] x = new double[shell.length]; @@ -365,7 +368,7 @@ protected static org.apache.lucene.geo.Polygon polygonLucene(Coordinate[][] poly y[i] = normalizeLat(shell[i].y); } - return new org.apache.lucene.geo.Polygon(y, x, holes); + return new org.elasticsearch.geo.geometry.Polygon(new org.elasticsearch.geo.geometry.LinearRing(y, x), holes); } /** @@ -386,12 +389,12 @@ protected static MultiPolygon multipolygonS4J(GeometryFactory factory, Coordinat return factory.createMultiPolygon(polygonSet); } - protected static org.apache.lucene.geo.Polygon[] multipolygonLucene(Coordinate[][][] polygons) { - org.apache.lucene.geo.Polygon[] polygonSet = new org.apache.lucene.geo.Polygon[polygons.length]; - for (int i = 0; i < polygonSet.length; ++i) { - polygonSet[i] = polygonLucene(polygons[i]); + protected static org.elasticsearch.geo.geometry.MultiPolygon multipolygon(Coordinate[][][] polygons) { + List polygonSet = new ArrayList<>(polygons.length); + for (int i = 0; i < polygons.length; ++i) { + polygonSet.add(polygonGeometry(polygons[i])); } - return polygonSet; + return new org.elasticsearch.geo.geometry.MultiPolygon(polygonSet); } /** diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index 365dddb70eab4..d6ba295be67e3 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -52,7 +52,8 @@ /** * Basic class for building GeoJSON shapes like Polygons, Linestrings, etc */ -public abstract class ShapeBuilder> implements NamedWriteable, ToXContentObject { +public abstract class ShapeBuilder> implements NamedWriteable, ToXContentObject { protected static final Logger LOGGER = LogManager.getLogger(ShapeBuilder.class); @@ -218,7 +219,7 @@ protected JtsGeometry jtsGeometry(Geometry geom) { * * @return GeoPoint, double[][], Line, Line[], Polygon, Polygon[], Rectangle, Object[] */ - public abstract Object buildLucene(); + public abstract G buildGeometry(); protected static Coordinate shift(Coordinate coordinate, double dateline) { if (dateline == 0) { @@ -484,7 +485,7 @@ public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof ShapeBuilder)) return false; - ShapeBuilder that = (ShapeBuilder) o; + ShapeBuilder that = (ShapeBuilder) o; return Objects.equals(coordinates, that.coordinates); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 486ff0bbe4a70..72b3e68fa025e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -22,13 +22,20 @@ import org.apache.lucene.document.LatLonShape; import org.apache.lucene.geo.Line; import org.apache.lucene.geo.Polygon; -import org.apache.lucene.geo.Rectangle; import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.Explicit; -import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.GeometryVisitor; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; import java.io.IOException; import java.util.ArrayList; @@ -107,7 +114,7 @@ public void parse(ParseContext context) throws IOException { if (shapeBuilder == null) { return; } - shape = shapeBuilder.buildLucene(); + shape = shapeBuilder.buildGeometry(); } indexShape(context, shape); } catch (Exception e) { @@ -120,45 +127,95 @@ public void parse(ParseContext context) throws IOException { } private void indexShape(ParseContext context, Object luceneShape) { - if (luceneShape instanceof GeoPoint) { - GeoPoint pt = (GeoPoint) luceneShape; - indexFields(context, LatLonShape.createIndexableFields(name(), pt.lat(), pt.lon())); - } else if (luceneShape instanceof double[]) { - double[] pt = (double[]) luceneShape; - indexFields(context, LatLonShape.createIndexableFields(name(), pt[1], pt[0])); - } else if (luceneShape instanceof Line) { - indexFields(context, LatLonShape.createIndexableFields(name(), (Line)luceneShape)); - } else if (luceneShape instanceof Polygon) { - indexFields(context, LatLonShape.createIndexableFields(name(), (Polygon) luceneShape)); - } else if (luceneShape instanceof double[][]) { - double[][] pts = (double[][])luceneShape; - for (int i = 0; i < pts.length; ++i) { - indexFields(context, LatLonShape.createIndexableFields(name(), pts[i][1], pts[i][0])); + if (luceneShape instanceof Geometry) { + ((Geometry) luceneShape).visit(new LuceneGeometryIndexer(context)); + } else { + throw new IllegalArgumentException("invalid shape type found [" + luceneShape.getClass() + "] while indexing shape"); + } + } + + private class LuceneGeometryIndexer implements GeometryVisitor { + private ParseContext context; + + private LuceneGeometryIndexer(ParseContext context) { + this.context = context; + } + + @Override + public Void visit(Circle circle) { + throw new IllegalArgumentException("invalid shape type found [Circle] while indexing shape"); + } + + @Override + public Void visit(GeometryCollection collection) { + for (Geometry geometry : collection) { + geometry.visit(this); } - } else if (luceneShape instanceof Line[]) { - Line[] lines = (Line[]) luceneShape; - for (int i = 0; i < lines.length; ++i) { - indexFields(context, LatLonShape.createIndexableFields(name(), lines[i])); + return null; + } + + @Override + public Void visit(org.elasticsearch.geo.geometry.Line line) { + indexFields(context, LatLonShape.createIndexableFields(name(), new Line(line.getLats(), line.getLons()))); + return null; + } + + @Override + public Void visit(LinearRing ring) { + throw new IllegalArgumentException("invalid shape type found [LinearRing] while indexing shape"); + } + + @Override + public Void visit(MultiLine multiLine) { + for (org.elasticsearch.geo.geometry.Line line : multiLine) { + visit(line); } - } else if (luceneShape instanceof Polygon[]) { - Polygon[] polys = (Polygon[]) luceneShape; - for (int i = 0; i < polys.length; ++i) { - indexFields(context, LatLonShape.createIndexableFields(name(), polys[i])); + return null; + } + + @Override + public Void visit(MultiPoint multiPoint) { + for(Point point : multiPoint) { + visit(point); } - } else if (luceneShape instanceof Rectangle) { - // index rectangle as a polygon - Rectangle r = (Rectangle) luceneShape; - Polygon p = new Polygon(new double[]{r.minLat, r.minLat, r.maxLat, r.maxLat, r.minLat}, - new double[]{r.minLon, r.maxLon, r.maxLon, r.minLon, r.minLon}); - indexFields(context, LatLonShape.createIndexableFields(name(), p)); - } else if (luceneShape instanceof Object[]) { - // recurse to index geometry collection - for (Object o : (Object[])luceneShape) { - indexShape(context, o); + return null; + } + + @Override + public Void visit(MultiPolygon multiPolygon) { + for(org.elasticsearch.geo.geometry.Polygon polygon : multiPolygon) { + visit(polygon); } - } else { - throw new IllegalArgumentException("invalid shape type found [" + luceneShape.getClass() + "] while indexing shape"); + return null; + } + + @Override + public Void visit(Point point) { + indexFields(context, LatLonShape.createIndexableFields(name(), point.getLat(), point.getLon())); + return null; + } + + @Override + public Void visit(org.elasticsearch.geo.geometry.Polygon polygon) { + indexFields(context, LatLonShape.createIndexableFields(name(), toLucenePolygon(polygon))); + return null; + } + + @Override + public Void visit(org.elasticsearch.geo.geometry.Rectangle r) { + Polygon p = new Polygon(new double[]{r.getMinLat(), r.getMinLat(), r.getMaxLat(), r.getMaxLat(), r.getMinLat()}, + new double[]{r.getMinLon(), r.getMaxLon(), r.getMaxLon(), r.getMinLon(), r.getMinLon()}); + indexFields(context, LatLonShape.createIndexableFields(name(), p)); + return null; + } + } + + public static Polygon toLucenePolygon(org.elasticsearch.geo.geometry.Polygon polygon) { + Polygon[] holes = new Polygon[polygon.getNumberOfHoles()]; + for(int i = 0; i() { + @Override + public Query visit(Circle circle) { + throw new QueryShardException(context, "Field [" + fieldName + "] found and unknown shape Circle"); + } + + @Override + public Query visit(GeometryCollection collection) { + BooleanQuery.Builder bqb = new BooleanQuery.Builder(); + visit(bqb, collection); + return bqb.build(); + } + + private void visit(BooleanQuery.Builder bqb, GeometryCollection collection) { + for (Geometry shape : collection) { + if (shape instanceof MultiPoint) { + // Flatten multipoints + visit(bqb, (GeometryCollection) shape); + } else { + bqb.add(shape.visit(this), BooleanClause.Occur.SHOULD); + } } } - return LatLonShape.newBoxQuery(fieldName, relation.getLuceneRelation(), pt[1], pt[1], pt[0], pt[0]); - } else if (queryShape instanceof Object[]) { - geoQuery = createGeometryCollectionQuery(context, (Object[]) queryShape); - } else { - throw new QueryShardException(context, "Field [" + fieldName + "] found and unknown shape"); - } - return geoQuery; - } - private Query createGeometryCollectionQuery(QueryShardContext context, Object... shapes) { - BooleanQuery.Builder bqb = new BooleanQuery.Builder(); - for (Object shape : shapes) { - bqb.add(getVectorQueryFromShape(context, shape), BooleanClause.Occur.SHOULD); - } - return bqb.build(); + @Override + public Query visit(org.elasticsearch.geo.geometry.Line line) { + return LatLonShape.newLineQuery(fieldName(), relation.getLuceneRelation(), new Line(line.getLats(), line.getLons())); + } + + @Override + public Query visit(LinearRing ring) { + throw new QueryShardException(context, "Field [" + fieldName + "] found and unsupported shape LinearRing"); + } + + @Override + public Query visit(MultiLine multiLine) { + Line[] lines = new Line[multiLine.size()]; + for (int i=0; ii.y).toArray(), - Arrays.stream(coordinates).mapToDouble(i->i.x).toArray()); + Arrays.stream(coordinates).mapToDouble(i->i.x).toArray() + )); assertGeometryEquals(p, polygonGeoJson, false); } @@ -502,7 +508,7 @@ public void testParseOGCPolygonWithoutHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); } // test 2: ccw poly crossing dateline @@ -527,7 +533,7 @@ public void testParseOGCPolygonWithoutHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); } // test 3: cw poly not crossing dateline @@ -552,7 +558,7 @@ public void testParseOGCPolygonWithoutHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); } // test 4: cw poly crossing dateline @@ -577,7 +583,7 @@ public void testParseOGCPolygonWithoutHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); } } @@ -610,7 +616,7 @@ public void testParseOGCPolygonWithHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); } // test 2: ccw poly crossing dateline @@ -641,7 +647,7 @@ public void testParseOGCPolygonWithHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); } // test 3: cw poly not crossing dateline @@ -672,7 +678,7 @@ public void testParseOGCPolygonWithHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); } // test 4: cw poly crossing dateline @@ -703,7 +709,7 @@ public void testParseOGCPolygonWithHoles() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); } } @@ -855,12 +861,12 @@ public void testParsePolygonWithHole() throws IOException { Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, holes); assertGeometryEquals(jtsGeom(expected), polygonGeoJson, true); - org.apache.lucene.geo.Polygon hole = - new org.apache.lucene.geo.Polygon( + org.elasticsearch.geo.geometry.LinearRing hole = + new org.elasticsearch.geo.geometry.LinearRing( new double[] {0.8d, 0.2d, 0.2d, 0.8d, 0.8d}, new double[] {100.8d, 100.8d, 100.2d, 100.2d, 100.8d}); - org.apache.lucene.geo.Polygon p = - new org.apache.lucene.geo.Polygon( - new double[] {0d, 0d, 1d, 1d, 0d}, new double[] {100d, 101d, 101d, 100d, 100d}, hole); + org.elasticsearch.geo.geometry.Polygon p = + new org.elasticsearch.geo.geometry.Polygon(new org.elasticsearch.geo.geometry.LinearRing( + new double[] {0d, 0d, 1d, 1d, 0d}, new double[] {100d, 101d, 101d, 100d, 100d}), Collections.singletonList(hole)); assertGeometryEquals(p, polygonGeoJson, false); } @@ -902,9 +908,9 @@ public void testParseMultiPoint() throws IOException { SPATIAL_CONTEXT.makePoint(101, 1.0)); assertGeometryEquals(expected, multiPointGeoJson, true); - assertGeometryEquals(new double[][]{ - new double[] {100d, 0d}, - new double[] {101d, 1d}}, multiPointGeoJson, false); + assertGeometryEquals(new MultiPoint(Arrays.asList( + new org.elasticsearch.geo.geometry.Point(0, 100), + new org.elasticsearch.geo.geometry.Point(1, 101))), multiPointGeoJson, false); } @Override @@ -975,16 +981,15 @@ public void testParseMultiPolygon() throws IOException { assertGeometryEquals(expected, multiPolygonGeoJson, true); - org.apache.lucene.geo.Polygon hole = - new org.apache.lucene.geo.Polygon( + org.elasticsearch.geo.geometry.LinearRing hole = new org.elasticsearch.geo.geometry.LinearRing( new double[] {0.8d, 0.2d, 0.2d, 0.8d, 0.8d}, new double[] {100.8d, 100.8d, 100.2d, 100.2d, 100.8d}); - org.apache.lucene.geo.Polygon[] polygons = new org.apache.lucene.geo.Polygon[] { - new org.apache.lucene.geo.Polygon( - new double[] {2d, 3d, 3d, 2d, 2d}, new double[] {103d, 103d, 102d, 102d, 103d}), - new org.apache.lucene.geo.Polygon( - new double[] {0d, 1d, 1d, 0d, 0d}, new double[] {101d, 101d, 100d, 100d, 101d}, hole) - }; + org.elasticsearch.geo.geometry.MultiPolygon polygons = new org.elasticsearch.geo.geometry.MultiPolygon(Arrays.asList( + new org.elasticsearch.geo.geometry.Polygon(new org.elasticsearch.geo.geometry.LinearRing( + new double[] {2d, 3d, 3d, 2d, 2d}, new double[] {103d, 103d, 102d, 102d, 103d})), + new org.elasticsearch.geo.geometry.Polygon(new org.elasticsearch.geo.geometry.LinearRing( + new double[] {0d, 1d, 1d, 0d, 0d}, new double[] {101d, 101d, 100d, 100d, 101d}), Collections.singletonList(hole)))); + assertGeometryEquals(polygons, multiPolygonGeoJson, false); // test #2: multipolygon; one polygon with one hole @@ -1034,14 +1039,13 @@ public void testParseMultiPolygon() throws IOException { assertGeometryEquals(jtsGeom(withHoles), multiPolygonGeoJson, true); - org.apache.lucene.geo.Polygon luceneHole = - new org.apache.lucene.geo.Polygon( + org.elasticsearch.geo.geometry.LinearRing luceneHole = + new org.elasticsearch.geo.geometry.LinearRing( new double[] {0.8d, 0.2d, 0.2d, 0.8d, 0.8d}, new double[] {100.8d, 100.8d, 100.2d, 100.2d, 100.8d}); - org.apache.lucene.geo.Polygon[] lucenePolygons = new org.apache.lucene.geo.Polygon[] { - new org.apache.lucene.geo.Polygon( - new double[] {0d, 0d, 1d, 1d, 0d}, new double[] {100d, 101d, 101d, 100d, 100d}, luceneHole) - }; + org.elasticsearch.geo.geometry.MultiPolygon lucenePolygons = new org.elasticsearch.geo.geometry.MultiPolygon( + Collections.singletonList(new org.elasticsearch.geo.geometry.Polygon(new org.elasticsearch.geo.geometry.LinearRing( + new double[] {0d, 0d, 1d, 1d, 0d}, new double[] {100d, 101d, 101d, 100d, 100d}), Collections.singletonList(luceneHole)))); assertGeometryEquals(lucenePolygons, multiPolygonGeoJson, false); } @@ -1117,19 +1121,21 @@ public void testParseGeometryCollection() throws IOException { //equals returns true only if geometries are in the same order assertGeometryEquals(shapeCollection(expected), geometryCollectionGeoJson, true); - Object[] luceneExpected = new Object[] { - new Line(new double[] {0d, 1d}, new double[] {100d, 101d}), - new GeoPoint(2d, 102d), - new org.apache.lucene.geo.Polygon( - new double[] {-12.142857142857142d, 12.142857142857142d, 15d, 0d, -15d, -12.142857142857142d}, - new double[] {180d, 180d, 176d, 172d, 176d, 180d} - ), - new org.apache.lucene.geo.Polygon( - new double[] {12.142857142857142d, -12.142857142857142d, -10d, 10d, 12.142857142857142d}, - new double[] {-180d, -180d, -177d, -177d, -180d} - ) - }; - assertGeometryEquals(luceneExpected, geometryCollectionGeoJson, false); + GeometryCollection geometryExpected = new GeometryCollection<> (Arrays.asList( + new org.elasticsearch.geo.geometry.Line(new double[] {0d, 1d}, new double[] {100d, 101d}), + new org.elasticsearch.geo.geometry.Point(2d, 102d), + new org.elasticsearch.geo.geometry.MultiPolygon(Arrays.asList( + new org.elasticsearch.geo.geometry.Polygon(new org.elasticsearch.geo.geometry.LinearRing( + new double[] {-12.142857142857142d, 12.142857142857142d, 15d, 0d, -15d, -12.142857142857142d}, + new double[] {180d, 180d, 176d, 172d, 176d, 180d} + )), + new org.elasticsearch.geo.geometry.Polygon(new org.elasticsearch.geo.geometry.LinearRing( + new double[] {12.142857142857142d, -12.142857142857142d, -10d, 10d, 12.142857142857142d}, + new double[] {-180d, -180d, -177d, -177d, -180d} + )) + )) + )); + assertGeometryEquals(geometryExpected, geometryCollectionGeoJson, false); } public void testThatParserExtractsCorrectTypeAndCoordinatesFromArbitraryJson() throws IOException { @@ -1151,7 +1157,7 @@ public void testThatParserExtractsCorrectTypeAndCoordinatesFromArbitraryJson() t Point expected = GEOMETRY_FACTORY.createPoint(new Coordinate(100.0, 0.0)); assertGeometryEquals(new JtsPoint(expected, SPATIAL_CONTEXT), pointGeoJson, true); - GeoPoint expectedPt = new GeoPoint(0, 100); + org.elasticsearch.geo.geometry.Point expectedPt = new org.elasticsearch.geo.geometry.Point(0, 100); assertGeometryEquals(expectedPt, pointGeoJson, false); } @@ -1187,7 +1193,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); } // test 2: valid ccw (right handed system) poly not crossing dateline (with 'ccw' field) @@ -1221,7 +1227,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); } // test 3: valid ccw (right handed system) poly not crossing dateline (with 'counterclockwise' field) @@ -1255,7 +1261,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertPolygon(ShapeParser.parse(parser).buildGeometry(), false); } // test 4: valid cw (left handed system) poly crossing dateline (with 'left' field) @@ -1289,7 +1295,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); } // test 5: valid cw multipoly (left handed system) poly crossing dateline (with 'cw' field) @@ -1323,7 +1329,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); } // test 6: valid cw multipoly (left handed system) poly crossing dateline (with 'clockwise' field) @@ -1357,7 +1363,7 @@ public void testParseOrientationOption() throws IOException { try (XContentParser parser = createParser(polygonGeoJson)) { parser.nextToken(); - ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildLucene(), false); + ElasticsearchGeoAssertions.assertMultiPolygon(ShapeParser.parse(parser).buildGeometry(), false); } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index 94c96e00d9236..f1d9b0f161570 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.geo; import org.apache.lucene.geo.GeoTestUtil; -import org.apache.lucene.geo.Line; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; @@ -41,6 +40,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import org.elasticsearch.index.mapper.LegacyGeoShapeFieldMapper; @@ -61,6 +63,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.elasticsearch.common.geo.builders.ShapeBuilder.SPATIAL_CONTEXT; @@ -72,7 +75,7 @@ */ public class GeoWKTShapeParserTests extends BaseGeoParsingTestCase { - private static XContentBuilder toWKTContent(ShapeBuilder builder, boolean generateMalformed) + private static XContentBuilder toWKTContent(ShapeBuilder builder, boolean generateMalformed) throws IOException { String wkt = builder.toWKT(); if (generateMalformed) { @@ -87,12 +90,12 @@ private static XContentBuilder toWKTContent(ShapeBuilder builder, boolean return XContentFactory.jsonBuilder().value(wkt); } - private void assertExpected(Object expected, ShapeBuilder builder, boolean useJTS) throws IOException { + private void assertExpected(Object expected, ShapeBuilder builder, boolean useJTS) throws IOException { XContentBuilder xContentBuilder = toWKTContent(builder, false); assertGeometryEquals(expected, xContentBuilder, useJTS); } - private void assertMalformed(ShapeBuilder builder) throws IOException { + private void assertMalformed(ShapeBuilder builder) throws IOException { XContentBuilder xContentBuilder = toWKTContent(builder, true); assertValidException(xContentBuilder, ElasticsearchParseException.class); } @@ -103,7 +106,7 @@ public void testParsePoint() throws IOException { Coordinate c = new Coordinate(p.lon(), p.lat()); Point expected = GEOMETRY_FACTORY.createPoint(c); assertExpected(new JtsPoint(expected, SPATIAL_CONTEXT), new PointBuilder().coordinate(c), true); - assertExpected(new GeoPoint(p.lat(), p.lon()), new PointBuilder().coordinate(c), false); + assertExpected(new org.elasticsearch.geo.geometry.Point(p.lat(), p.lon()), new PointBuilder().coordinate(c), false); assertMalformed(new PointBuilder().coordinate(c)); } @@ -123,13 +126,12 @@ public void testParseMultiPoint() throws IOException { ShapeCollection expected = shapeCollection(shapes); assertExpected(expected, new MultiPointBuilder(coordinates), true); - double[][] luceneShapes = new double[numPoints][2]; + List points = new ArrayList<>(numPoints); for (int i = 0; i < numPoints; ++i) { Coordinate c = coordinates.get(i); - luceneShapes[i][0] = c.x; - luceneShapes[i][1] = c.y; + points.add(new org.elasticsearch.geo.geometry.Point(c.y, c.x)); } - assertExpected(luceneShapes, new MultiPointBuilder(coordinates), false); + assertExpected(new MultiPoint(points), new MultiPointBuilder(coordinates), false); assertMalformed(new MultiPointBuilder(coordinates)); } @@ -175,13 +177,13 @@ public void testParseMultiLineString() throws IOException { lineStrings.toArray(new LineString[lineStrings.size()])); assertExpected(jtsGeom(expected), builder, true); - Line[] lines = new Line[lineStrings.size()]; + List lines = new ArrayList<>(lineStrings.size()); for (int j = 0; j < lineStrings.size(); ++j) { Coordinate[] c = lineStrings.get(j).getCoordinates(); - lines[j] = new Line(Arrays.stream(c).mapToDouble(i->i.y).toArray(), - Arrays.stream(c).mapToDouble(i->i.x).toArray()); + lines.add(new Line(Arrays.stream(c).mapToDouble(i->i.y).toArray(), + Arrays.stream(c).mapToDouble(i->i.x).toArray())); } - assertExpected(lines, builder, false); + assertExpected(new MultiLine(lines), builder, false); assertMalformed(builder); } @@ -245,12 +247,12 @@ public void testParsePolygonWithHole() throws IOException { Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, holes); assertExpected(jtsGeom(expected), polygonWithHole, true); - org.apache.lucene.geo.Polygon hole = - new org.apache.lucene.geo.Polygon( + org.elasticsearch.geo.geometry.LinearRing hole = + new org.elasticsearch.geo.geometry.LinearRing( new double[] {0.8d, 0.8d, 0.2d, 0.2d, 0.8d}, new double[] {100.2d, 100.8d, 100.8d, 100.2d, 100.2d}); - org.apache.lucene.geo.Polygon p = - new org.apache.lucene.geo.Polygon( - new double[] {0d, 1d, 1d, 0d, 0d}, new double[] {101d, 101d, 100d, 100d, 101d}, hole); + org.elasticsearch.geo.geometry.Polygon p = + new org.elasticsearch.geo.geometry.Polygon(new org.elasticsearch.geo.geometry.LinearRing( + new double[] {0d, 1d, 1d, 0d, 0d}, new double[] {101d, 101d, 100d, 100d, 101d}), Collections.singletonList(hole)); assertExpected(p, polygonWithHole, false); assertMalformed(polygonWithHole); } @@ -357,7 +359,7 @@ public void testParsePolyWithStoredZ() throws IOException { final LegacyGeoShapeFieldMapper mapperBuilder = (LegacyGeoShapeFieldMapper)(new LegacyGeoShapeFieldMapper.Builder("test").ignoreZValue(true).build(mockBuilderContext)); - ShapeBuilder shapeBuilder = ShapeParser.parse(parser, mapperBuilder); + ShapeBuilder shapeBuilder = ShapeParser.parse(parser, mapperBuilder); assertEquals(shapeBuilder.numDimensions(), 3); } @@ -383,7 +385,7 @@ public void testParseOpenPolygon() throws IOException { final LegacyGeoShapeFieldMapper coercingMapperBuilder = (LegacyGeoShapeFieldMapper)(new LegacyGeoShapeFieldMapper.Builder("test").coerce(true).build(mockBuilderContext)); - ShapeBuilder shapeBuilder = ShapeParser.parse(parser, coercingMapperBuilder); + ShapeBuilder shapeBuilder = ShapeParser.parse(parser, coercingMapperBuilder); assertNotNull(shapeBuilder); assertEquals("polygon ((100.0 5.0, 100.0 10.0, 90.0 10.0, 90.0 5.0, 100.0 5.0))", shapeBuilder.toWKT()); } @@ -418,7 +420,7 @@ public void testParseEnvelope() throws IOException { Rectangle expected = SPATIAL_CONTEXT.makeRectangle(r.minLon, r.maxLon, r.minLat, r.maxLat); assertExpected(expected, builder, true); - assertExpected(r, builder, false); + assertExpected(new org.elasticsearch.geo.geometry.Rectangle(r.minLat, r.maxLat, r.minLon, r.maxLon), builder, false); assertMalformed(builder); } @@ -436,12 +438,12 @@ public void testParseGeometryCollection() throws IOException { if (randomBoolean()) { assertEquals(shapeCollection(expected).isEmpty(), builder.buildS4J().isEmpty()); } else { - assertEquals(shapeCollection(expected).isEmpty(), ((Object[])builder.buildLucene()).length == 0); + assertEquals(shapeCollection(expected).isEmpty(), builder.buildGeometry().size() == 0); } } else { GeometryCollectionBuilder gcb = RandomShapeGenerator.createGeometryCollection(random()); assertExpected(gcb.buildS4J(), gcb, true); - assertExpected(gcb.buildLucene(), gcb, false); + assertExpected(gcb.buildGeometry(), gcb, false); } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java index 8b8bd2285ca3c..32f1b333c4ead 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java @@ -52,7 +52,7 @@ public void testNewPoint() { Point point = pb.buildS4J(); assertEquals(-100D, point.getX(), 0.0d); assertEquals(45D, point.getY(), 0.0d); - GeoPoint geoPoint = pb.buildLucene(); + org.elasticsearch.geo.geometry.Point geoPoint = pb.buildGeometry(); assertEquals(-100D, geoPoint.getLon(), 0.0d); assertEquals(45D, geoPoint.getLat(), 0.0d); } @@ -65,11 +65,11 @@ public void testNewRectangle() { assertEquals(45D, rectangle.getMaxX(), 0.0d); assertEquals(30D, rectangle.getMaxY(), 0.0d); - org.apache.lucene.geo.Rectangle luceneRectangle = eb.buildLucene(); - assertEquals(-45D, luceneRectangle.minLon, 0.0d); - assertEquals(-30D, luceneRectangle.minLat, 0.0d); - assertEquals(45D, luceneRectangle.maxLon, 0.0d); - assertEquals(30D, luceneRectangle.maxLat, 0.0d); + org.elasticsearch.geo.geometry.Rectangle luceneRectangle = eb.buildGeometry(); + assertEquals(-45D, luceneRectangle.getMinLon(), 0.0d); + assertEquals(-30D, luceneRectangle.getMinLat(), 0.0d); + assertEquals(45D, luceneRectangle.getMaxLon(), 0.0d); + assertEquals(30D, luceneRectangle.getMaxLat(), 0.0d); } public void testNewPolygon() { @@ -87,15 +87,15 @@ public void testNewPolygon() { assertEquals(exterior.getCoordinateN(2), new Coordinate(45, -30)); assertEquals(exterior.getCoordinateN(3), new Coordinate(-45, -30)); - org.apache.lucene.geo.Polygon lucenePoly = (org.apache.lucene.geo.Polygon)(pb.toPolygonLucene()); - assertEquals(lucenePoly.getPolyLat(0), 30, 0d); - assertEquals(lucenePoly.getPolyLon(0), -45, 0d); - assertEquals(lucenePoly.getPolyLat(1), 30, 0d); - assertEquals(lucenePoly.getPolyLon(1), 45, 0d); - assertEquals(lucenePoly.getPolyLat(2), -30, 0d); - assertEquals(lucenePoly.getPolyLon(2), 45, 0d); - assertEquals(lucenePoly.getPolyLat(3), -30, 0d); - assertEquals(lucenePoly.getPolyLon(3), -45, 0d); + org.elasticsearch.geo.geometry.LinearRing polygon = pb.toPolygonGeometry().getPolygon(); + assertEquals(polygon.getLat(0), 30, 0d); + assertEquals(polygon.getLon(0), -45, 0d); + assertEquals(polygon.getLat(1), 30, 0d); + assertEquals(polygon.getLon(1), 45, 0d); + assertEquals(polygon.getLat(2), -30, 0d); + assertEquals(polygon.getLon(2), 45, 0d); + assertEquals(polygon.getLat(3), -30, 0d); + assertEquals(polygon.getLon(3), -45, 0d); } public void testNewPolygon_coordinate() { @@ -113,15 +113,15 @@ public void testNewPolygon_coordinate() { assertEquals(exterior.getCoordinateN(2), new Coordinate(45, -30)); assertEquals(exterior.getCoordinateN(3), new Coordinate(-45, -30)); - org.apache.lucene.geo.Polygon lucenePoly = (org.apache.lucene.geo.Polygon)(pb.toPolygonLucene()); - assertEquals(lucenePoly.getPolyLat(0), 30, 0d); - assertEquals(lucenePoly.getPolyLon(0), -45, 0d); - assertEquals(lucenePoly.getPolyLat(1), 30, 0d); - assertEquals(lucenePoly.getPolyLon(1), 45, 0d); - assertEquals(lucenePoly.getPolyLat(2), -30, 0d); - assertEquals(lucenePoly.getPolyLon(2), 45, 0d); - assertEquals(lucenePoly.getPolyLat(3), -30, 0d); - assertEquals(lucenePoly.getPolyLon(3), -45, 0d); + org.elasticsearch.geo.geometry.LinearRing polygon = pb.toPolygonGeometry().getPolygon(); + assertEquals(polygon.getLat(0), 30, 0d); + assertEquals(polygon.getLon(0), -45, 0d); + assertEquals(polygon.getLat(1), 30, 0d); + assertEquals(polygon.getLon(1), 45, 0d); + assertEquals(polygon.getLat(2), -30, 0d); + assertEquals(polygon.getLon(2), 45, 0d); + assertEquals(polygon.getLat(3), -30, 0d); + assertEquals(polygon.getLon(3), -45, 0d); } public void testNewPolygon_coordinates() { @@ -137,15 +137,15 @@ public void testNewPolygon_coordinates() { assertEquals(exterior.getCoordinateN(2), new Coordinate(45, -30)); assertEquals(exterior.getCoordinateN(3), new Coordinate(-45, -30)); - org.apache.lucene.geo.Polygon lucenePoly = (org.apache.lucene.geo.Polygon)(pb.toPolygonLucene()); - assertEquals(lucenePoly.getPolyLat(0), 30, 0d); - assertEquals(lucenePoly.getPolyLon(0), -45, 0d); - assertEquals(lucenePoly.getPolyLat(1), 30, 0d); - assertEquals(lucenePoly.getPolyLon(1), 45, 0d); - assertEquals(lucenePoly.getPolyLat(2), -30, 0d); - assertEquals(lucenePoly.getPolyLon(2), 45, 0d); - assertEquals(lucenePoly.getPolyLat(3), -30, 0d); - assertEquals(lucenePoly.getPolyLon(3), -45, 0d); + org.elasticsearch.geo.geometry.LinearRing polygon = pb.toPolygonGeometry().getPolygon(); + assertEquals(polygon.getLat(0), 30, 0d); + assertEquals(polygon.getLon(0), -45, 0d); + assertEquals(polygon.getLat(1), 30, 0d); + assertEquals(polygon.getLon(1), 45, 0d); + assertEquals(polygon.getLat(2), -30, 0d); + assertEquals(polygon.getLon(2), 45, 0d); + assertEquals(polygon.getLat(3), -30, 0d); + assertEquals(polygon.getLon(3), -45, 0d); } public void testLineStringBuilder() { @@ -161,7 +161,7 @@ public void testLineStringBuilder() { .coordinate(-110.0, 55.0)); lsb.buildS4J(); - lsb.buildLucene(); + lsb.buildGeometry(); // Building a linestring that needs to be wrapped lsb = new LineStringBuilder(new CoordinatesBuilder() @@ -175,7 +175,7 @@ public void testLineStringBuilder() { .coordinate(130.0, 60.0)); lsb.buildS4J(); - lsb.buildLucene(); + lsb.buildGeometry(); // Building a lineString on the dateline lsb = new LineStringBuilder(new CoordinatesBuilder() @@ -185,7 +185,7 @@ public void testLineStringBuilder() { .coordinate(-180.0, -80.0)); lsb.buildS4J(); - lsb.buildLucene(); + lsb.buildGeometry(); // Building a lineString on the dateline lsb = new LineStringBuilder(new CoordinatesBuilder() @@ -195,7 +195,7 @@ public void testLineStringBuilder() { .coordinate(180.0, -80.0)); lsb.buildS4J(); - lsb.buildLucene(); + lsb.buildGeometry(); } public void testMultiLineString() { @@ -215,7 +215,7 @@ public void testMultiLineString() { ) ); mlsb.buildS4J(); - mlsb.buildLucene(); + mlsb.buildGeometry(); // LineString that needs to be wrapped new MultiLineStringBuilder() @@ -235,7 +235,7 @@ public void testMultiLineString() { ); mlsb.buildS4J(); - mlsb.buildLucene(); + mlsb.buildGeometry(); } public void testPolygonSelfIntersection() { @@ -283,7 +283,7 @@ public void testPolygonWrapping() { .close()); assertMultiPolygon(pb.buildS4J(), true); - assertMultiPolygon(pb.buildLucene(), false); + assertMultiPolygon(pb.buildGeometry(), false); } public void testLineStringWrapping() { @@ -295,7 +295,7 @@ public void testLineStringWrapping() { .close()); assertMultiLineString(lsb.buildS4J(), true); - assertMultiLineString(lsb.buildLucene(), false); + assertMultiLineString(lsb.buildGeometry(), false); } public void testDatelineOGC() { @@ -339,7 +339,7 @@ public void testDatelineOGC() { )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildLucene(), false); + assertMultiPolygon(builder.close().buildGeometry(), false); } public void testDateline() { @@ -383,7 +383,7 @@ public void testDateline() { )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildLucene(), false); + assertMultiPolygon(builder.close().buildGeometry(), false); } public void testComplexShapeWithHole() { @@ -458,7 +458,7 @@ public void testComplexShapeWithHole() { ) ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildLucene(), false); + assertPolygon(builder.close().buildGeometry(), false); } public void testShapeWithHoleAtEdgeEndPoints() { @@ -480,7 +480,7 @@ public void testShapeWithHoleAtEdgeEndPoints() { .coordinate(4, 1) )); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildLucene(), false); + assertPolygon(builder.close().buildGeometry(), false); } public void testShapeWithPointOnDateline() { @@ -491,7 +491,7 @@ public void testShapeWithPointOnDateline() { .coordinate(180, 0) ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildLucene(), false); + assertPolygon(builder.close().buildGeometry(), false); } public void testShapeWithEdgeAlongDateline() { @@ -504,7 +504,7 @@ public void testShapeWithEdgeAlongDateline() { ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildLucene(), false); + assertPolygon(builder.close().buildGeometry(), false); // test case 2: test the negative side of the dateline builder = new PolygonBuilder(new CoordinatesBuilder() @@ -515,7 +515,7 @@ public void testShapeWithEdgeAlongDateline() { ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildLucene(), false); + assertPolygon(builder.close().buildGeometry(), false); } public void testShapeWithBoundaryHoles() { @@ -537,7 +537,7 @@ public void testShapeWithBoundaryHoles() { )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildLucene(), false); + assertMultiPolygon(builder.close().buildGeometry(), false); // test case 2: test the negative side of the dateline builder = new PolygonBuilder( @@ -560,7 +560,7 @@ public void testShapeWithBoundaryHoles() { )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildLucene(), false); + assertMultiPolygon(builder.close().buildGeometry(), false); } public void testShapeWithTangentialHole() { @@ -582,7 +582,7 @@ public void testShapeWithTangentialHole() { )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildLucene(), false); + assertMultiPolygon(builder.close().buildGeometry(), false); } public void testShapeWithInvalidTangentialHole() { @@ -606,7 +606,7 @@ public void testShapeWithInvalidTangentialHole() { e = expectThrows(InvalidShapeException.class, () -> builder.close().buildS4J()); assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); - e = expectThrows(InvalidShapeException.class, () -> builder.close().buildLucene()); + e = expectThrows(InvalidShapeException.class, () -> builder.close().buildGeometry()); assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); } @@ -634,7 +634,7 @@ public void testBoundaryShapeWithTangentialHole() { .coordinate(172, 0) )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildLucene(), false); + assertMultiPolygon(builder.close().buildGeometry(), false); } public void testBoundaryShapeWithInvalidTangentialHole() { @@ -657,7 +657,7 @@ public void testBoundaryShapeWithInvalidTangentialHole() { Exception e; e = expectThrows(InvalidShapeException.class, () -> builder.close().buildS4J()); assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); - e = expectThrows(InvalidShapeException.class, () -> builder.close().buildLucene()); + e = expectThrows(InvalidShapeException.class, () -> builder.close().buildGeometry()); assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); } @@ -673,7 +673,7 @@ public void testBoundaryShape() { ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildLucene(), false); + assertPolygon(builder.close().buildGeometry(), false); } public void testShapeWithAlternateOrientation() { @@ -686,7 +686,7 @@ public void testShapeWithAlternateOrientation() { ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildLucene(), false); + assertPolygon(builder.close().buildGeometry(), false); // cw: geo core will convert to ccw across the dateline builder = new PolygonBuilder(new CoordinatesBuilder() @@ -697,7 +697,7 @@ public void testShapeWithAlternateOrientation() { ); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildLucene(), false); + assertMultiPolygon(builder.close().buildGeometry(), false); } public void testInvalidShapeWithConsecutiveDuplicatePoints() { @@ -711,7 +711,7 @@ public void testInvalidShapeWithConsecutiveDuplicatePoints() { Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().buildS4J()); assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: (")); - e = expectThrows(InvalidShapeException.class, () -> builder.close().buildLucene()); + e = expectThrows(InvalidShapeException.class, () -> builder.close().buildGeometry()); assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: (")); } diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/server/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index 20e159ded41e4..ffee74966530b 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -37,7 +37,7 @@ import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; -public abstract class AbstractShapeBuilderTestCase> extends ESTestCase { +public abstract class AbstractShapeBuilderTestCase> extends ESTestCase { private static final int NUMBER_OF_TESTBUILDERS = 20; private static NamedWriteableRegistry namedWriteableRegistry; @@ -81,7 +81,7 @@ public void testFromXContent() throws IOException { XContentBuilder shuffled = shuffleXContent(builder); try (XContentParser shapeContentParser = createParser(shuffled)) { shapeContentParser.nextToken(); - ShapeBuilder parsedShape = ShapeParser.parse(shapeContentParser); + ShapeBuilder parsedShape = ShapeParser.parse(shapeContentParser); assertNotSame(testShape, parsedShape); assertEquals(testShape, parsedShape); assertEquals(testShape.hashCode(), parsedShape.hashCode()); diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilderTests.java index b44ba6769d79f..55ca2d907f6ff 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilderTests.java @@ -69,7 +69,7 @@ static GeometryCollectionBuilder mutate(GeometryCollectionBuilder original) thro GeometryCollectionBuilder mutation = copyShape(original); if (mutation.shapes.size() > 0) { int shapePosition = randomIntBetween(0, mutation.shapes.size() - 1); - ShapeBuilder shapeToChange = mutation.shapes.get(shapePosition); + ShapeBuilder shapeToChange = mutation.shapes.get(shapePosition); switch (shapeToChange.type()) { case POINT: shapeToChange = PointBuilderTests.mutate((PointBuilder) shapeToChange); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java index 2fb4cfeb81dbf..ea3125accd059 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java @@ -187,7 +187,7 @@ public void parse(ParseContext context) throws IOException { // Let's add a Dummy Shape PointBuilder pb = new PointBuilder(-100, 45); if (shapeMapper instanceof GeoShapeFieldMapper) { - shapeMapper.parse(context.createExternalValueContext(pb.buildLucene())); + shapeMapper.parse(context.createExternalValueContext(pb.buildGeometry())); } else { shapeMapper.parse(context.createExternalValueContext(pb.buildS4J())); } diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java index 83543c24f9bc1..92f017a7bdb7d 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java @@ -63,7 +63,7 @@ protected void doAssertLuceneQuery(GeoPolygonQueryBuilder queryBuilder, Query qu } private static List randomPolygon() { - ShapeBuilder shapeBuilder = null; + ShapeBuilder shapeBuilder = null; // This is a temporary fix because sometimes the RandomShapeGenerator // returns null. This is if there is an error generating the polygon. So // in this case keep trying until we successfully generate one diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 22f9705dcc5f9..4851387b1a497 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -63,7 +63,7 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase indexedShapeToReturn; + protected static ShapeBuilder indexedShapeToReturn; protected String fieldName() { return GEO_SHAPE_FIELD_NAME; @@ -88,7 +88,7 @@ protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { // LatLonShape does not support MultiPoint queries RandomShapeGenerator.ShapeType shapeType = randomFrom(ShapeType.POINT, ShapeType.LINESTRING, ShapeType.MULTILINESTRING, ShapeType.POLYGON); - ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null, shapeType); + ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null, shapeType); GeoShapeQueryBuilder builder; clearShapeFields(); if (indexedShape == false) { @@ -174,7 +174,7 @@ protected void doAssertLuceneQuery(GeoShapeQueryBuilder queryBuilder, Query quer } public void testNoFieldName() throws Exception { - ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null); + ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new GeoShapeQueryBuilder(null, shape)); assertEquals("fieldName is required", e.getMessage()); } @@ -190,7 +190,7 @@ public void testNoIndexedShape() throws IOException { } public void testNoRelation() throws IOException { - ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null); + ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null); GeoShapeQueryBuilder builder = new GeoShapeQueryBuilder(fieldName(), shape); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder.relation(null)); assertEquals("No Shape Relation defined", e.getMessage()); @@ -255,7 +255,7 @@ public void testMultipleRewrite() throws IOException { public void testIgnoreUnmapped() throws IOException { ShapeType shapeType = ShapeType.randomType(random()); - ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null, shapeType); + ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null, shapeType); final GeoShapeQueryBuilder queryBuilder = new GeoShapeQueryBuilder("unmapped", shape); queryBuilder.ignoreUnmapped(true); Query query = queryBuilder.toQuery(createShardContext()); @@ -270,7 +270,7 @@ public void testIgnoreUnmapped() throws IOException { public void testWrongFieldType() throws IOException { ShapeType shapeType = ShapeType.randomType(random()); - ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null, shapeType); + ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null, shapeType); final GeoShapeQueryBuilder queryBuilder = new GeoShapeQueryBuilder(STRING_FIELD_NAME, shape); QueryShardException e = expectThrows(QueryShardException.class, () -> queryBuilder.toQuery(createShardContext())); assertThat(e.getMessage(), containsString("Field [mapped_string] is not of type [geo_shape] but of type [text]")); diff --git a/server/src/test/java/org/elasticsearch/index/query/LegacyGeoShapeFieldQueryTests.java b/server/src/test/java/org/elasticsearch/index/query/LegacyGeoShapeFieldQueryTests.java index 2dcf3245dfe15..dc2a4a0e3fffe 100644 --- a/server/src/test/java/org/elasticsearch/index/query/LegacyGeoShapeFieldQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/LegacyGeoShapeFieldQueryTests.java @@ -51,7 +51,7 @@ protected Settings createTestIndexSettings() { @Override protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { ShapeType shapeType = ShapeType.randomType(random()); - ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null, shapeType); + ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null, shapeType); GeoShapeQueryBuilder builder; clearShapeFields(); if (indexedShape == false) { @@ -93,7 +93,7 @@ protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { } public void testInvalidRelation() throws IOException { - ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null); + ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null); GeoShapeQueryBuilder builder = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); builder.strategy(SpatialStrategy.TERM); expectThrows(IllegalArgumentException.class, () -> builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN))); diff --git a/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java b/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java index daa04442535eb..a5b64597181cf 100644 --- a/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java +++ b/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.geo.geometry.MultiLine; import org.hamcrest.Matcher; import org.junit.Assert; import org.locationtech.jts.geom.Coordinate; @@ -221,7 +222,9 @@ public static void assertEquals(Object s1, Object s2) { || (s1 instanceof GeoPoint && s2 instanceof GeoPoint)) { Assert.assertEquals(s1, s2); } else if (s1 instanceof Object[] && s2 instanceof Object[]) { - Assert.assertArrayEquals((Object[])s1, (Object[])s2); + Assert.assertArrayEquals((Object[]) s1, (Object[]) s2); + } else if (s1 instanceof org.elasticsearch.geo.geometry.Geometry && s2 instanceof org.elasticsearch.geo.geometry.Geometry) { + Assert.assertEquals(s1, s2); } else { //We want to know the type of the shape because we test shape equality in a special way... //... in particular we test that one ring is equivalent to another ring even if the points are rotated or reversed. @@ -242,7 +245,7 @@ public static void assertMultiPolygon(Object shape, boolean useJTS) { unwrapJTS(shape) instanceof MultiPolygon); } else { assertTrue("expected Polygon[] but found " + shape.getClass().getName(), - shape instanceof org.apache.lucene.geo.Polygon[]); + shape instanceof org.elasticsearch.geo.geometry.MultiPolygon); } } @@ -252,7 +255,7 @@ public static void assertPolygon(Object shape, boolean useJTS) { + unwrapJTS(shape).getClass().getName(), unwrapJTS(shape) instanceof Polygon); } else { assertTrue("expected Polygon but found " + shape.getClass().getName(), - shape instanceof org.apache.lucene.geo.Polygon); + shape instanceof org.elasticsearch.geo.geometry.Polygon); } } @@ -262,7 +265,7 @@ public static void assertLineString(Object shape, boolean useJTS) { + unwrapJTS(shape).getClass().getName(), unwrapJTS(shape) instanceof LineString); } else { assertTrue("expected Line but found " + shape.getClass().getName(), - shape instanceof org.apache.lucene.geo.Line); + shape instanceof org.elasticsearch.geo.geometry.Line); } } @@ -272,7 +275,7 @@ public static void assertMultiLineString(Object shape, boolean useJTS) { + unwrapJTS(shape).getClass().getName(), unwrapJTS(shape) instanceof MultiLineString); } else { assertTrue("expected Line[] but found " + shape.getClass().getName(), - shape instanceof org.apache.lucene.geo.Line[]); + shape instanceof MultiLine); } } From afd4618851813d15584f1aac656fab4d694fbeb6 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Fri, 25 Jan 2019 12:33:42 -0500 Subject: [PATCH 55/64] Fixes for a few randomized agg tests that fail hasValue() checks Closes #37743 Closes #37873 --- .../bucket/filter/FilterAggregatorTests.java | 40 +++++----- .../bucket/filter/FiltersAggregatorTests.java | 78 +++++++++---------- .../bucket/nested/NestedAggregatorTests.java | 12 ++- 3 files changed, 70 insertions(+), 60 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java index af5f65b9698e4..56153d2d35f13 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java @@ -63,7 +63,6 @@ public void testEmpty() throws Exception { directory.close(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37743") public void testRandom() throws Exception { Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); @@ -86,26 +85,31 @@ public void testRandom() throws Exception { IndexReader indexReader = DirectoryReader.open(directory); IndexSearcher indexSearcher = newSearcher(indexReader, true, true); - int value = randomInt(maxTerm - 1); - QueryBuilder filter = QueryBuilders.termQuery("field", Integer.toString(value)); - FilterAggregationBuilder builder = new FilterAggregationBuilder("test", filter); + try { + + int value = randomInt(maxTerm - 1); + QueryBuilder filter = QueryBuilders.termQuery("field", Integer.toString(value)); + FilterAggregationBuilder builder = new FilterAggregationBuilder("test", filter); - for (boolean doReduce : new boolean[] {true, false}) { - final InternalFilter response; - if (doReduce) { - response = searchAndReduce(indexSearcher, new MatchAllDocsQuery(), builder, + for (boolean doReduce : new boolean[]{true, false}) { + final InternalFilter response; + if (doReduce) { + response = searchAndReduce(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); - } else { - response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); - } - assertEquals(response.getDocCount(), (long) expectedBucketCount[value]); - if (expectedBucketCount[expectedBucketCount[value]] > 0) { - assertTrue(AggregationInspectionHelper.hasValue(response)); - } else { - assertFalse(AggregationInspectionHelper.hasValue(response)); + } else { + response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); + } + assertEquals(response.getDocCount(), (long) expectedBucketCount[value]); + if (expectedBucketCount[value] > 0) { + assertTrue(AggregationInspectionHelper.hasValue(response)); + } else { + assertFalse(AggregationInspectionHelper.hasValue(response)); + } } + } finally { + indexReader.close(); + directory.close(); } - indexReader.close(); - directory.close(); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java index ff5cb84482db0..90a1a11dde26f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java @@ -160,52 +160,50 @@ public void testRandom() throws Exception { IndexReader indexReader = DirectoryReader.open(directory); IndexSearcher indexSearcher = newSearcher(indexReader, true, true); - int numFilters = randomIntBetween(1, 10); - QueryBuilder[] filters = new QueryBuilder[numFilters]; - int[] filterTerms = new int[numFilters]; - int expectedOtherCount = numDocs; - Set filterSet = new HashSet<>(); - for (int i = 0; i < filters.length; i++) { - int value = randomInt(maxTerm-1); - filters[i] = QueryBuilders.termQuery("field", Integer.toString(value)); - filterTerms[i] = value; - if (filterSet.contains(value) == false) { - expectedOtherCount -= expectedBucketCount[value]; - filterSet.add(value); - } - } - FiltersAggregationBuilder builder = new FiltersAggregationBuilder("test", filters); - builder.otherBucket(true); - builder.otherBucketKey("other"); - - for (boolean doReduce : new boolean[] {true, false}) { - final InternalFilters response; - if (doReduce) { - response = searchAndReduce(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); - } else { - response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); + try { + int numFilters = randomIntBetween(1, 10); + QueryBuilder[] filters = new QueryBuilder[numFilters]; + int[] filterTerms = new int[numFilters]; + int expectedOtherCount = numDocs; + Set filterSet = new HashSet<>(); + for (int i = 0; i < filters.length; i++) { + int value = randomInt(maxTerm - 1); + filters[i] = QueryBuilders.termQuery("field", Integer.toString(value)); + filterTerms[i] = value; + if (filterSet.contains(value) == false) { + expectedOtherCount -= expectedBucketCount[value]; + filterSet.add(value); + } } - List buckets = response.getBuckets(); - assertEquals(buckets.size(), filters.length+1); + FiltersAggregationBuilder builder = new FiltersAggregationBuilder("test", filters); + builder.otherBucket(true); + builder.otherBucketKey("other"); - int sum = 0; - for (InternalFilters.InternalBucket bucket : buckets) { - if ("other".equals(bucket.getKey())) { - assertEquals(bucket.getDocCount(), expectedOtherCount); + for (boolean doReduce : new boolean[]{true, false}) { + final InternalFilters response; + if (doReduce) { + response = searchAndReduce(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); } else { - int index = Integer.parseInt(bucket.getKey()); - assertEquals(bucket.getDocCount(), (long) expectedBucketCount[filterTerms[index]]); - sum += expectedBucketCount[filterTerms[index]]; + response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); } - } - if (sum > 0) { + List buckets = response.getBuckets(); + assertEquals(buckets.size(), filters.length + 1); + + for (InternalFilters.InternalBucket bucket : buckets) { + if ("other".equals(bucket.getKey())) { + assertEquals(bucket.getDocCount(), expectedOtherCount); + } else { + int index = Integer.parseInt(bucket.getKey()); + assertEquals(bucket.getDocCount(), (long) expectedBucketCount[filterTerms[index]]); + } + } + + // Always true because we include 'other' in the agg assertTrue(AggregationInspectionHelper.hasValue(response)); - } else { - assertFalse(AggregationInspectionHelper.hasValue(response)); } - + } finally { + indexReader.close(); + directory.close(); } - indexReader.close(); - directory.close(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index 1eef8de86b304..96d66c9e0c269 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -174,7 +174,11 @@ public void testSingleNestingMax() throws IOException { assertEquals(MAX_AGG_NAME, max.getName()); assertEquals(expectedMaxValue, max.getValue(), Double.MIN_VALUE); - assertTrue(AggregationInspectionHelper.hasValue(nested)); + if (expectedNestedDocs > 0) { + assertTrue(AggregationInspectionHelper.hasValue(nested)); + } else { + assertFalse(AggregationInspectionHelper.hasValue(nested)); + } } } } @@ -224,7 +228,11 @@ public void testDoubleNestingMax() throws IOException { assertEquals(MAX_AGG_NAME, max.getName()); assertEquals(expectedMaxValue, max.getValue(), Double.MIN_VALUE); - assertTrue(AggregationInspectionHelper.hasValue(nested)); + if (expectedNestedDocs > 0) { + assertTrue(AggregationInspectionHelper.hasValue(nested)); + } else { + assertFalse(AggregationInspectionHelper.hasValue(nested)); + } } } } From f1e71be8b24f65bc9fb340f269436acab3642366 Mon Sep 17 00:00:00 2001 From: Yuri Astrakhan Date: Fri, 25 Jan 2019 13:37:24 -0500 Subject: [PATCH 56/64] Refactored GeoHashGrid unit tests (#37832) * Refactored GeoHashGrid unit tests This change allows other grid aggregations to reuse the same tests. The change mostly just moves code to the base classes, trying to keep changes to a bare minimum. * rename createInternalGeoHashGridBucket to createInternalGeoGridBucket * indentation --- .../geogrid/GeoGridAggregatorTestCase.java | 141 +++++++++++++++ .../bucket/geogrid/GeoGridTestCase.java | 167 ++++++++++++++++++ .../geogrid/GeoHashGridAggregatorTests.java | 111 ++---------- .../bucket/geogrid/GeoHashGridTests.java | 112 +----------- 4 files changed, 328 insertions(+), 203 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java new file mode 100644 index 0000000000000..5965574bef6e8 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.index.mapper.GeoPointFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; + +public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { + + private static final String FIELD_NAME = "location"; + + /** + * Generate a random precision according to the rules of the given aggregation. + */ + protected abstract int randomPrecision(); + + /** + * Convert geo point into a hash string (bucket string ID) + */ + protected abstract String hashAsString(double lng, double lat, int precision); + + /** + * Create a new named {@link GeoGridAggregationBuilder}-derived builder + */ + protected abstract GeoGridAggregationBuilder createBuilder(String name); + + public void testNoDocs() throws IOException { + testCase(new MatchAllDocsQuery(), FIELD_NAME, randomPrecision(), iw -> { + // Intentionally not writing any docs + }, geoGrid -> { + assertEquals(0, geoGrid.getBuckets().size()); + }); + } + + public void testFieldMissing() throws IOException { + testCase(new MatchAllDocsQuery(), "wrong_field", randomPrecision(), iw -> { + iw.addDocument(Collections.singleton(new LatLonDocValuesField(FIELD_NAME, 10D, 10D))); + }, geoGrid -> { + assertEquals(0, geoGrid.getBuckets().size()); + }); + } + + public void testWithSeveralDocs() throws IOException { + int precision = randomPrecision(); + int numPoints = randomIntBetween(8, 128); + Map expectedCountPerGeoHash = new HashMap<>(); + testCase(new MatchAllDocsQuery(), FIELD_NAME, precision, iw -> { + List points = new ArrayList<>(); + Set distinctHashesPerDoc = new HashSet<>(); + for (int pointId = 0; pointId < numPoints; pointId++) { + double lat = (180d * randomDouble()) - 90d; + double lng = (360d * randomDouble()) - 180d; + + points.add(new LatLonDocValuesField(FIELD_NAME, lat, lng)); + String hash = hashAsString(lng, lat, precision); + if (distinctHashesPerDoc.contains(hash) == false) { + expectedCountPerGeoHash.put(hash, expectedCountPerGeoHash.getOrDefault(hash, 0) + 1); + } + distinctHashesPerDoc.add(hash); + if (usually()) { + iw.addDocument(points); + points.clear(); + distinctHashesPerDoc.clear(); + } + } + if (points.size() != 0) { + iw.addDocument(points); + } + }, geoHashGrid -> { + assertEquals(expectedCountPerGeoHash.size(), geoHashGrid.getBuckets().size()); + for (GeoGrid.Bucket bucket : geoHashGrid.getBuckets()) { + assertEquals((long) expectedCountPerGeoHash.get(bucket.getKeyAsString()), bucket.getDocCount()); + } + assertTrue(AggregationInspectionHelper.hasValue(geoHashGrid)); + }); + } + + private void testCase(Query query, String field, int precision, CheckedConsumer buildIndex, + Consumer> verify) throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + buildIndex.accept(indexWriter); + indexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + GeoGridAggregationBuilder aggregationBuilder = createBuilder("_name").field(field); + aggregationBuilder.precision(precision); + MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType(); + fieldType.setHasDocValues(true); + fieldType.setName(FIELD_NAME); + + Aggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(query, aggregator); + aggregator.postCollection(); + verify.accept((InternalGeoGrid) aggregator.buildAggregation(0L)); + + indexReader.close(); + directory.close(); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java new file mode 100644 index 0000000000000..1f3803cca859b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridTestCase.java @@ -0,0 +1,167 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.geogrid; + +import org.apache.lucene.index.IndexWriter; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; +import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public abstract class GeoGridTestCase> + extends InternalMultiBucketAggregationTestCase { + + /** + * Instantiate a {@link InternalGeoGrid}-derived class using the same parameters as constructor. + */ + protected abstract T createInternalGeoGrid(String name, int size, List buckets, + List pipelineAggregators, Map metaData); + + /** + * Instantiate a {@link InternalGeoGridBucket}-derived class using the same parameters as constructor. + */ + protected abstract B createInternalGeoGridBucket(Long key, long docCount, InternalAggregations aggregations); + + /** + * Encode longitude and latitude with a given precision as a long hash. + */ + protected abstract long longEncode(double lng, double lat, int precision); + + /** + * Generate a random precision according to the rules of the given aggregation. + */ + protected abstract int randomPrecision(); + + @Override + protected int minNumberOfBuckets() { + return 1; + } + + @Override + protected int maxNumberOfBuckets() { + return 3; + } + + @Override + protected T createTestInstance(String name, + List pipelineAggregators, + Map metaData, + InternalAggregations aggregations) { + final int precision = randomPrecision(); + int size = randomNumberOfBuckets(); + List buckets = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + double latitude = randomDoubleBetween(-90.0, 90.0, false); + double longitude = randomDoubleBetween(-180.0, 180.0, false); + + long hashAsLong = longEncode(longitude, latitude, precision); + buckets.add(createInternalGeoGridBucket(hashAsLong, randomInt(IndexWriter.MAX_DOCS), aggregations)); + } + return createInternalGeoGrid(name, size, buckets, pipelineAggregators, metaData); + } + + @Override + protected void assertReduced(T reduced, List inputs) { + Map> map = new HashMap<>(); + for (T input : inputs) { + for (GeoGrid.Bucket bucketBase : input.getBuckets()) { + B bucket = (B) bucketBase; + List buckets = map.get(bucket.hashAsLong); + if (buckets == null) { + map.put(bucket.hashAsLong, buckets = new ArrayList<>()); + } + buckets.add(bucket); + } + } + List expectedBuckets = new ArrayList<>(); + for (Map.Entry> entry : map.entrySet()) { + long docCount = 0; + for (B bucket : entry.getValue()) { + docCount += bucket.docCount; + } + expectedBuckets.add(createInternalGeoGridBucket(entry.getKey(), docCount, InternalAggregations.EMPTY)); + } + expectedBuckets.sort((first, second) -> { + int cmp = Long.compare(second.docCount, first.docCount); + if (cmp == 0) { + return second.compareTo(first); + } + return cmp; + }); + int requestedSize = inputs.get(0).getRequiredSize(); + expectedBuckets = expectedBuckets.subList(0, Math.min(requestedSize, expectedBuckets.size())); + assertEquals(expectedBuckets.size(), reduced.getBuckets().size()); + for (int i = 0; i < reduced.getBuckets().size(); i++) { + GeoGrid.Bucket expected = expectedBuckets.get(i); + GeoGrid.Bucket actual = reduced.getBuckets().get(i); + assertEquals(expected.getDocCount(), actual.getDocCount()); + assertEquals(expected.getKey(), actual.getKey()); + } + } + + @Override + protected Class implementationClass() { + return ParsedGeoGrid.class; + } + + @Override + protected T mutateInstance(T instance) { + String name = instance.getName(); + int size = instance.getRequiredSize(); + List buckets = instance.getBuckets(); + List pipelineAggregators = instance.pipelineAggregators(); + Map metaData = instance.getMetaData(); + switch (between(0, 3)) { + case 0: + name += randomAlphaOfLength(5); + break; + case 1: + buckets = new ArrayList<>(buckets); + buckets.add( + createInternalGeoGridBucket(randomNonNegativeLong(), randomInt(IndexWriter.MAX_DOCS), InternalAggregations.EMPTY)); + break; + case 2: + size = size + between(1, 10); + break; + case 3: + if (metaData == null) { + metaData = new HashMap<>(1); + } else { + metaData = new HashMap<>(instance.getMetaData()); + } + metaData.put(randomAlphaOfLength(15), randomInt()); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return createInternalGeoGrid(name, size, buckets, pipelineAggregators, metaData); + } + + public void testCreateFromBuckets() { + InternalGeoGrid original = createTestInstance(); + assertThat(original, equalTo(original.create(original.buckets))); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java index ce4a065ef4c77..d01a1d3b6d098 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java @@ -16,114 +16,25 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.aggregations.bucket.geogrid; - -import org.apache.lucene.document.LatLonDocValuesField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.store.Directory; -import org.elasticsearch.common.CheckedConsumer; -import org.elasticsearch.index.mapper.GeoPointFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Consumer; +package org.elasticsearch.search.aggregations.bucket.geogrid; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; -public class GeoHashGridAggregatorTests extends AggregatorTestCase { - - private static final String FIELD_NAME = "location"; - - public void testNoDocs() throws IOException { - testCase(new MatchAllDocsQuery(), FIELD_NAME, 1, iw -> { - // Intentionally not writing any docs - }, geoHashGrid -> { - assertEquals(0, geoHashGrid.getBuckets().size()); - assertFalse(AggregationInspectionHelper.hasValue(geoHashGrid)); - }); - } +public class GeoHashGridAggregatorTests extends GeoGridAggregatorTestCase { - public void testFieldMissing() throws IOException { - testCase(new MatchAllDocsQuery(), "wrong_field", 1, iw -> { - iw.addDocument(Collections.singleton(new LatLonDocValuesField(FIELD_NAME, 10D, 10D))); - }, geoHashGrid -> { - assertEquals(0, geoHashGrid.getBuckets().size()); - assertFalse(AggregationInspectionHelper.hasValue(geoHashGrid)); - }); + @Override + protected int randomPrecision() { + return randomIntBetween(1, 12); } - public void testWithSeveralDocs() throws IOException { - int precision = randomIntBetween(1, 12); - int numPoints = randomIntBetween(8, 128); - Map expectedCountPerGeoHash = new HashMap<>(); - testCase(new MatchAllDocsQuery(), FIELD_NAME, precision, iw -> { - List points = new ArrayList<>(); - Set distinctHashesPerDoc = new HashSet<>(); - for (int pointId = 0; pointId < numPoints; pointId++) { - double lat = (180d * randomDouble()) - 90d; - double lng = (360d * randomDouble()) - 180d; - points.add(new LatLonDocValuesField(FIELD_NAME, lat, lng)); - String hash = stringEncode(lng, lat, precision); - if (distinctHashesPerDoc.contains(hash) == false) { - expectedCountPerGeoHash.put(hash, expectedCountPerGeoHash.getOrDefault(hash, 0) + 1); - } - distinctHashesPerDoc.add(hash); - if (usually()) { - iw.addDocument(points); - points.clear(); - distinctHashesPerDoc.clear(); - } - } - if (points.size() != 0) { - iw.addDocument(points); - } - }, geoHashGrid -> { - assertEquals(expectedCountPerGeoHash.size(), geoHashGrid.getBuckets().size()); - for (GeoGrid.Bucket bucket : geoHashGrid.getBuckets()) { - assertEquals((long) expectedCountPerGeoHash.get(bucket.getKeyAsString()), bucket.getDocCount()); - } - assertTrue(AggregationInspectionHelper.hasValue(geoHashGrid)); - }); + @Override + protected String hashAsString(double lng, double lat, int precision) { + return stringEncode(lng, lat, precision); } - private void testCase(Query query, String field, int precision, CheckedConsumer buildIndex, - Consumer verify) throws IOException { - Directory directory = newDirectory(); - RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); - buildIndex.accept(indexWriter); - indexWriter.close(); - - IndexReader indexReader = DirectoryReader.open(directory); - IndexSearcher indexSearcher = newSearcher(indexReader, true, true); - - GeoGridAggregationBuilder aggregationBuilder = new GeoHashGridAggregationBuilder("_name").field(field); - aggregationBuilder.precision(precision); - MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType(); - fieldType.setHasDocValues(true); - fieldType.setName(FIELD_NAME); - - Aggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); - aggregator.preCollection(); - indexSearcher.search(query, aggregator); - aggregator.postCollection(); - verify.accept((InternalGeoHashGrid) aggregator.buildAggregation(0L)); - - indexReader.close(); - directory.close(); + @Override + protected GeoGridAggregationBuilder createBuilder(String name) { + return new GeoHashGridAggregationBuilder(name); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java index 02c8016556220..c48308e6e1724 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java @@ -18,47 +18,19 @@ */ package org.elasticsearch.search.aggregations.bucket.geogrid; -import org.apache.lucene.index.IndexWriter; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; -import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; -import static org.hamcrest.Matchers.equalTo; - -public class GeoHashGridTests extends InternalMultiBucketAggregationTestCase { - - @Override - protected int minNumberOfBuckets() { - return 1; - } +public class GeoHashGridTests extends GeoGridTestCase { @Override - protected int maxNumberOfBuckets() { - return 3; - } - - @Override - protected InternalGeoHashGrid createTestInstance(String name, - List pipelineAggregators, - Map metaData, - InternalAggregations aggregations) { - int size = randomNumberOfBuckets(); - List buckets = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - double latitude = randomDoubleBetween(-90.0, 90.0, false); - double longitude = randomDoubleBetween(-180.0, 180.0, false); - - long geoHashAsLong = GeoHashUtils.longEncode(longitude, latitude, 4); - buckets.add(new InternalGeoHashGridBucket(geoHashAsLong, randomInt(IndexWriter.MAX_DOCS), aggregations)); - } + protected InternalGeoHashGrid createInternalGeoGrid(String name, int size, List buckets, + List pipelineAggregators, Map metaData) { return new InternalGeoHashGrid(name, size, buckets, pipelineAggregators, metaData); } @@ -68,83 +40,17 @@ protected Writeable.Reader instanceReader() { } @Override - protected void assertReduced(InternalGeoHashGrid reduced, List inputs) { - Map> map = new HashMap<>(); - for (InternalGeoHashGrid input : inputs) { - for (InternalGeoGridBucket bucket : input.getBuckets()) { - List buckets = map.get(bucket.hashAsLong); - if (buckets == null) { - map.put(bucket.hashAsLong, buckets = new ArrayList<>()); - } - buckets.add(bucket); - } - } - List expectedBuckets = new ArrayList<>(); - for (Map.Entry> entry : map.entrySet()) { - long docCount = 0; - for (InternalGeoGridBucket bucket : entry.getValue()) { - docCount += bucket.docCount; - } - expectedBuckets.add(new InternalGeoHashGridBucket(entry.getKey(), docCount, InternalAggregations.EMPTY)); - } - expectedBuckets.sort((first, second) -> { - int cmp = Long.compare(second.docCount, first.docCount); - if (cmp == 0) { - return second.compareTo(first); - } - return cmp; - }); - int requestedSize = inputs.get(0).getRequiredSize(); - expectedBuckets = expectedBuckets.subList(0, Math.min(requestedSize, expectedBuckets.size())); - assertEquals(expectedBuckets.size(), reduced.getBuckets().size()); - for (int i = 0; i < reduced.getBuckets().size(); i++) { - GeoGrid.Bucket expected = expectedBuckets.get(i); - GeoGrid.Bucket actual = reduced.getBuckets().get(i); - assertEquals(expected.getDocCount(), actual.getDocCount()); - assertEquals(expected.getKey(), actual.getKey()); - } + protected InternalGeoHashGridBucket createInternalGeoGridBucket(Long key, long docCount, InternalAggregations aggregations) { + return new InternalGeoHashGridBucket(key, docCount, aggregations); } @Override - protected Class implementationClass() { - return ParsedGeoHashGrid.class; + protected long longEncode(double lng, double lat, int precision) { + return GeoHashUtils.longEncode(lng, lat, precision); } @Override - protected InternalGeoHashGrid mutateInstance(InternalGeoHashGrid instance) { - String name = instance.getName(); - int size = instance.getRequiredSize(); - List buckets = instance.getBuckets(); - List pipelineAggregators = instance.pipelineAggregators(); - Map metaData = instance.getMetaData(); - switch (between(0, 3)) { - case 0: - name += randomAlphaOfLength(5); - break; - case 1: - buckets = new ArrayList<>(buckets); - buckets.add( - new InternalGeoHashGridBucket(randomNonNegativeLong(), randomInt(IndexWriter.MAX_DOCS), InternalAggregations.EMPTY)); - break; - case 2: - size = size + between(1, 10); - break; - case 3: - if (metaData == null) { - metaData = new HashMap<>(1); - } else { - metaData = new HashMap<>(instance.getMetaData()); - } - metaData.put(randomAlphaOfLength(15), randomInt()); - break; - default: - throw new AssertionError("Illegal randomisation branch"); - } - return new InternalGeoHashGrid(name, size, buckets, pipelineAggregators, metaData); - } - - public void testCreateFromBuckets() { - InternalGeoHashGrid original = createTestInstance(); - assertThat(original, equalTo(original.create(original.buckets))); + protected int randomPrecision() { + return randomIntBetween(1, 12); } } From 455f223c3a8526d7462159ab362754c890d2af6f Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 25 Jan 2019 11:11:46 -0800 Subject: [PATCH 57/64] Mute TransformIntegrationTests#testSearchTransform Tracked in #37882. --- .../xpack/watcher/transform/TransformIntegrationTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java index 0a12a0b1a643d..042e82765354f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java @@ -156,6 +156,7 @@ public void testScriptTransform() throws Exception { assertThat(response.getHits().getAt(0).getSourceAsMap().get("key3").toString(), equalTo("20")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37882") public void testSearchTransform() throws Exception { createIndex("my-condition-index", "my-payload-index"); ensureGreen("my-condition-index", "my-payload-index"); From 5cd4dfb0e42b01fff2ce6e410339f4edca11be42 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 25 Jan 2019 14:54:13 -0500 Subject: [PATCH 58/64] Relax cluster metadata version check (#37834) If the in_sync_allocations of index-1 or index-2 is changed, the metadata version will be increased. This leads to the failure in the metadata version checks. We need to relax them. Closes #37820 --- .../elasticsearch/cluster/SimpleClusterStateIT.java | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 45c4d5d3927f7..a22b3c42b931e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -119,20 +119,19 @@ public void testMetadata() throws Exception { assertThat(clusterStateResponse.getState().metaData().indices().size(), is(0)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37820") public void testMetadataVersion() { createIndex("index-1"); createIndex("index-2"); - long metadataVersion = client().admin().cluster().prepareState().get().getState().metaData().version(); - assertThat(metadataVersion, greaterThan(0L)); + long baselineVersion = client().admin().cluster().prepareState().get().getState().metaData().version(); + assertThat(baselineVersion, greaterThan(0L)); assertThat(client().admin().cluster().prepareState().setIndices("index-1").get().getState().metaData().version(), - equalTo(metadataVersion)); + greaterThanOrEqualTo(baselineVersion)); assertThat(client().admin().cluster().prepareState().setIndices("index-2").get().getState().metaData().version(), - equalTo(metadataVersion)); + greaterThanOrEqualTo(baselineVersion)); assertThat(client().admin().cluster().prepareState().setIndices("*").get().getState().metaData().version(), - equalTo(metadataVersion)); + greaterThanOrEqualTo(baselineVersion)); assertThat(client().admin().cluster().prepareState().setIndices("not-found").get().getState().metaData().version(), - equalTo(metadataVersion)); + greaterThanOrEqualTo(baselineVersion)); assertThat(client().admin().cluster().prepareState().clear().setMetaData(false).get().getState().metaData().version(), equalTo(0L)); } From 9e932f48699b4b331cfd34482d7058f46a683106 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 25 Jan 2019 13:57:41 -0600 Subject: [PATCH 59/64] ML: removing unnecessary upgrade code (#37879) --- .../xpack/core/XPackClientPlugin.java | 2 - .../xpack/core/ml/action/MlUpgradeAction.java | 160 ------ .../core/ml/action/MlUpgradeRequestTests.java | 28 - .../ml/qa/ml-with-security/build.gradle | 4 +- .../xpack/ml/integration/MlUpgradeIT.java | 378 ------------- .../xpack/ml/MachineLearning.java | 9 +- .../xpack/ml/ResultsIndexUpgradeService.java | 513 ------------------ .../ml/action/TransportMlUpgradeAction.java | 79 --- .../ml/rest/results/RestUpgradeMlAction.java | 76 --- .../rest-api-spec/api/ml.upgrade.json | 21 - .../rest-api-spec/test/ml/ml_upgrade.yml | 70 --- .../mixed_cluster/80_ml_results_upgrade.yml | 11 - .../old_cluster/80_ml_results_upgrade.yml | 120 ---- .../80_ml_results_upgrade.yml | 158 ------ 14 files changed, 3 insertions(+), 1626 deletions(-) delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlUpgradeAction.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/MlUpgradeRequestTests.java delete mode 100644 x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlUpgradeIT.java delete mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/ResultsIndexUpgradeService.java delete mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlUpgradeAction.java delete mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestUpgradeMlAction.java delete mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/ml.upgrade.json delete mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_upgrade.yml delete mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_ml_results_upgrade.yml delete mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_ml_results_upgrade.yml delete mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_ml_results_upgrade.yml diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a121217d4cdaa..25b745c4f499a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -107,7 +107,6 @@ import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; import org.elasticsearch.xpack.core.ml.action.PutFilterAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; -import org.elasticsearch.xpack.core.ml.action.MlUpgradeAction; import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; @@ -292,7 +291,6 @@ public List> getClientActions() { PostCalendarEventsAction.INSTANCE, PersistJobAction.INSTANCE, FindFileStructureAction.INSTANCE, - MlUpgradeAction.INSTANCE, // security ClearRealmCacheAction.INSTANCE, ClearRolesCacheAction.INSTANCE, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlUpgradeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlUpgradeAction.java deleted file mode 100644 index 404f15d4f6270..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlUpgradeAction.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.ml.action; - -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.action.support.master.MasterNodeReadRequest; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.tasks.CancellableTask; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; - -import java.io.IOException; -import java.util.Map; -import java.util.Objects; - - -public class MlUpgradeAction extends Action { - public static final MlUpgradeAction INSTANCE = new MlUpgradeAction(); - public static final String NAME = "cluster:admin/xpack/ml/upgrade"; - - private MlUpgradeAction() { - super(NAME); - } - - @Override - public AcknowledgedResponse newResponse() { - return new AcknowledgedResponse(); - } - - public static class Request extends MasterNodeReadRequest implements ToXContentObject { - - private static final ParseField REINDEX_BATCH_SIZE = new ParseField("reindex_batch_size"); - - public static ObjectParser PARSER = new ObjectParser<>("ml_upgrade", true, Request::new); - static { - PARSER.declareInt(Request::setReindexBatchSize, REINDEX_BATCH_SIZE); - } - - static final String INDEX = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "*"; - private int reindexBatchSize = 1000; - - /** - * Should this task store its result? - */ - private boolean shouldStoreResult; - - // for serialization - public Request() { - } - - public Request(StreamInput in) throws IOException { - super(in); - reindexBatchSize = in.readInt(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeInt(reindexBatchSize); - } - - public String[] indices() { - return new String[]{INDEX}; - } - - public IndicesOptions indicesOptions() { - return IndicesOptions.strictExpandOpenAndForbidClosed(); - } - - /** - * Should this task store its result after it has finished? - */ - public Request setShouldStoreResult(boolean shouldStoreResult) { - this.shouldStoreResult = shouldStoreResult; - return this; - } - - @Override - public boolean getShouldStoreResult() { - return shouldStoreResult; - } - - public Request setReindexBatchSize(int reindexBatchSize) { - this.reindexBatchSize = reindexBatchSize; - return this; - } - - public int getReindexBatchSize() { - return reindexBatchSize; - } - - @Override - public ActionRequestValidationException validate() { - if (reindexBatchSize <= 0) { - ActionRequestValidationException validationException = new ActionRequestValidationException(); - validationException.addValidationError("["+ REINDEX_BATCH_SIZE.getPreferredName()+"] must be greater than 0."); - return validationException; - } - return null; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - Request request = (Request) o; - return Objects.equals(reindexBatchSize, request.reindexBatchSize); - } - - @Override - public int hashCode() { - return Objects.hash(reindexBatchSize); - } - - @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new CancellableTask(id, type, action, "ml-upgrade", parentTaskId, headers) { - @Override - public boolean shouldCancelChildrenOnCancellation() { - return true; - } - }; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(REINDEX_BATCH_SIZE.getPreferredName(), reindexBatchSize); - builder.endObject(); - return builder; - } - } - - public static class RequestBuilder extends MasterNodeReadOperationRequestBuilder { - - public RequestBuilder(ElasticsearchClient client) { - super(client, INSTANCE, new Request()); - } - } - -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/MlUpgradeRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/MlUpgradeRequestTests.java deleted file mode 100644 index 227fc20ec9688..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/MlUpgradeRequestTests.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.ml.action; - -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractWireSerializingTestCase; - - -public class MlUpgradeRequestTests extends AbstractWireSerializingTestCase { - - @Override - protected MlUpgradeAction.Request createTestInstance() { - MlUpgradeAction.Request request = new MlUpgradeAction.Request(); - if (randomBoolean()) { - request.setReindexBatchSize(randomIntBetween(1, 10_000)); - } - return request; - } - - @Override - protected Writeable.Reader instanceReader() { - return MlUpgradeAction.Request::new; - } - -} diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index 6e0127f614c9a..abfed3fd878d0 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -93,9 +93,7 @@ integTestRunner { 'ml/validate/Test job config that is invalid only because of the job ID', 'ml/validate_detector/Test invalid detector', 'ml/delete_forecast/Test delete on _all forecasts not allow no forecasts', - 'ml/delete_forecast/Test delete forecast on missing forecast', - 'ml/ml_upgrade/Upgrade results when there is nothing to upgrade', - 'ml/ml_upgrade/Upgrade results when there is nothing to upgrade not waiting for results' + 'ml/delete_forecast/Test delete forecast on missing forecast' ].join(',') } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlUpgradeIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlUpgradeIT.java deleted file mode 100644 index a2a05ea1686fa..0000000000000 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlUpgradeIT.java +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.integration; - -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.reindex.ReindexAction; -import org.elasticsearch.index.reindex.ReindexRequest; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.action.MlUpgradeAction; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; -import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; -import org.elasticsearch.xpack.ml.ResultsIndexUpgradeService; -import org.junit.After; -import org.junit.Assert; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeedBuilder; -import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createScheduledJob; -import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.indexDocs; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.core.Is.is; - -public class MlUpgradeIT extends MlNativeAutodetectIntegTestCase { - - @After - public void cleanup() throws Exception { - cleanUp(); - } - - public void testMigrationWhenItIsNotNecessary() throws Exception { - String jobId1 = "no-migration-test1"; - String jobId2 = "no-migration-test2"; - String jobId3 = "no-migration-test3"; - - String dataIndex = createDataIndex().v2(); - List jobs = createJobsWithData(jobId1, jobId2, jobId3, dataIndex); - Job job1 = jobs.get(0); - Job job2 = jobs.get(1); - Job job3 = jobs.get(2); - - String job1Index = job1.getResultsIndexName(); - String job2Index = job2.getResultsIndexName(); - String job3Index = job3.getResultsIndexName(); - - assertThat(indexExists(job1Index), is(true)); - assertThat(indexExists(job2Index), is(true)); - assertThat(indexExists(job3Index), is(true)); - - long job1Total = getTotalDocCount(job1Index); - long job2Total = getTotalDocCount(job2Index); - long job3Total = getTotalDocCount(job3Index); - - AcknowledgedResponse resp = ESIntegTestCase.client().execute(MlUpgradeAction.INSTANCE, - new MlUpgradeAction.Request()).actionGet(); - assertThat(resp.isAcknowledged(), is(true)); - - // Migration should have done nothing - assertThat(indexExists(job1Index), is(true)); - assertThat(indexExists(job2Index), is(true)); - assertThat(indexExists(job3Index), is(true)); - - assertThat(getTotalDocCount(job1Index), equalTo(job1Total)); - assertThat(getTotalDocCount(job2Index), equalTo(job2Total)); - assertThat(getTotalDocCount(job3Index), equalTo(job3Total)); - - ClusterState state = admin().cluster().state(new ClusterStateRequest()).actionGet().getState(); - IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); - String[] indices = indexNameExpressionResolver.concreteIndexNames(state, - IndicesOptions.strictExpandOpenAndForbidClosed(), - AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*"); - - // Our backing index size should be two as we have a shared and custom index - assertThat(indices.length, equalTo(2)); - } - - public void testMigration() throws Exception { - String jobId1 = "migration-test1"; - String jobId2 = "migration-test2"; - String jobId3 = "migration-test3"; - - String dataIndex = createDataIndex().v2(); - List jobs = createJobsWithData(jobId1, jobId2, jobId3, dataIndex); - Job job1 = jobs.get(0); - Job job2 = jobs.get(1); - Job job3 = jobs.get(2); - - String job1Index = job1.getResultsIndexName(); - String job2Index = job2.getResultsIndexName(); - String job3Index = job3.getResultsIndexName(); - - assertThat(indexExists(job1Index), is(true)); - assertThat(indexExists(job2Index), is(true)); - assertThat(indexExists(job3Index), is(true)); - - long job1Total = getJobResultsCount(job1.getId()); - long job2Total = getJobResultsCount(job2.getId()); - long job3Total = getJobResultsCount(job3.getId()); - - IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); - - ResultsIndexUpgradeService resultsIndexUpgradeService = new ResultsIndexUpgradeService(indexNameExpressionResolver, - ThreadPool.Names.SAME, - indexMetaData -> true); - - PlainActionFuture future = PlainActionFuture.newFuture(); - - resultsIndexUpgradeService.upgrade(ESIntegTestCase.client(), - new MlUpgradeAction.Request(), - ESIntegTestCase.client().admin().cluster().prepareState().get().getState(), - future); - - AcknowledgedResponse response = future.get(); - assertThat(response.isAcknowledged(), is(true)); - - assertThat(indexExists(job1Index), is(false)); - assertThat(indexExists(job2Index), is(false)); - assertThat(indexExists(job3Index), is(false)); - - ClusterState state = admin().cluster().state(new ClusterStateRequest()).actionGet().getState(); - String[] indices = indexNameExpressionResolver.concreteIndexNames(state, - IndicesOptions.strictExpandOpenAndForbidClosed(), - AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*"); - - // Our backing index size should be four as we have a shared and custom index and upgrading doubles the number of indices - Assert.assertThat(indices.length, equalTo(4)); - - refresh(indices); - assertThat(getJobResultsCount(job1.getId()), equalTo(job1Total)); - assertThat(getJobResultsCount(job2.getId()), equalTo(job2Total)); - assertThat(getJobResultsCount(job3.getId()), equalTo(job3Total)); - - - // WE should still be able to write, and the aliases should allow to read from the appropriate indices - postDataToJob(jobId1); - postDataToJob(jobId2); - postDataToJob(jobId3); - // We should also be able to create new jobs and old jobs should be unaffected. - String jobId4 = "migration-test4"; - Job job4 = createAndOpenJobAndStartDataFeedWithData(jobId4, dataIndex, false); - waitUntilJobIsClosed(jobId4); - - indices = indexNameExpressionResolver.concreteIndexNames(state, - IndicesOptions.strictExpandOpenAndForbidClosed(), - AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*"); - refresh(indices); - - long newJob1Total = getJobResultsCount(job1.getId()); - assertThat(newJob1Total, greaterThan(job1Total)); - - long newJob2Total = getJobResultsCount(job2.getId()); - assertThat(newJob2Total, greaterThan(job2Total)); - - long newJob3Total = getJobResultsCount(job3.getId()); - assertThat(newJob3Total, greaterThan(job3Total)); - - assertThat(getJobResultsCount(jobId4), greaterThan(0L)); - assertThat(getJobResultsCount(jobId1), equalTo(newJob1Total)); - assertThat(getJobResultsCount(jobId2), equalTo(newJob2Total)); - assertThat(getJobResultsCount(jobId3), equalTo(newJob3Total)); - } - - //I think this test name could be a little bit longer.... - public void testMigrationWithManuallyCreatedIndexThatNeedsMigrating() throws Exception { - String jobId1 = "migration-failure-test1"; - String jobId2 = "migration-failure-test2"; - String jobId3 = "migration-failure-test3"; - - String dataIndex = createDataIndex().v2(); - List jobs = createJobsWithData(jobId1, jobId2, jobId3, dataIndex); - Job job1 = jobs.get(0); - Job job2 = jobs.get(1); - Job job3 = jobs.get(2); - - String job1Index = job1.getResultsIndexName(); - String job2Index = job2.getResultsIndexName(); - String job3Index = job3.getResultsIndexName(); - - // This index name should match one of the automatically created migration indices - String manuallyCreatedIndex = job1Index + "-" + Version.CURRENT.major; - client().admin().indices().prepareCreate(manuallyCreatedIndex).execute().actionGet(); - - IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); - - ResultsIndexUpgradeService resultsIndexUpgradeService = new ResultsIndexUpgradeService(indexNameExpressionResolver, - ThreadPool.Names.SAME, - indexMetaData -> true); //indicates that this manually created index needs migrated - - resultsIndexUpgradeService.upgrade(ESIntegTestCase.client(), - new MlUpgradeAction.Request(), - ESIntegTestCase.client().admin().cluster().prepareState().get().getState(), - ActionListener.wrap( - resp -> fail(), - exception -> { - assertThat(exception, instanceOf(IllegalStateException.class)); - assertThat(exception.getMessage(), - equalTo("Index [" + manuallyCreatedIndex + "] already exists and is not the current version.")); - } - )); - } - - public void testMigrationWithExistingIndexWithData() throws Exception { - String jobId1 = "partial-migration-test1"; - String jobId2 = "partial-migration-test2"; - String jobId3 = "partial-migration-test3"; - - String dataIndex = createDataIndex().v2(); - List jobs = createJobsWithData(jobId1, jobId2, jobId3, dataIndex); - Job job1 = jobs.get(0); - Job job2 = jobs.get(1); - Job job3 = jobs.get(2); - - String job1Index = job1.getResultsIndexName(); - String job2Index = job2.getResultsIndexName(); - String job3Index = job3.getResultsIndexName(); - - assertThat(indexExists(job1Index), is(true)); - assertThat(indexExists(job2Index), is(true)); - assertThat(indexExists(job3Index), is(true)); - - long job1Total = getJobResultsCount(job1.getId()); - long job2Total = getJobResultsCount(job2.getId()); - long job3Total = getJobResultsCount(job3.getId()); - - //lets manually create a READ index with reindexed data already - // Should still get aliased appropriately without any additional/duplicate data. - String alreadyMigratedIndex = job1Index + "-" + Version.CURRENT.major + "r"; - ReindexRequest reindexRequest = new ReindexRequest(); - reindexRequest.setSourceIndices(job1Index); - reindexRequest.setDestIndex(alreadyMigratedIndex); - client().execute(ReindexAction.INSTANCE, reindexRequest).actionGet(); - - //New write index as well, should still get aliased appropriately - String alreadyMigratedWriteIndex = job1Index + "-" + Version.CURRENT.major; - client().admin().indices().prepareCreate(alreadyMigratedWriteIndex).execute().actionGet(); - - IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); - - ResultsIndexUpgradeService resultsIndexUpgradeService = new ResultsIndexUpgradeService(indexNameExpressionResolver, - ThreadPool.Names.SAME, - //indicates that this manually created index is already migrated and should not be included in our migration steps - indexMetaData -> !(indexMetaData.getIndex().getName().equals(alreadyMigratedIndex) || - indexMetaData.getIndex().getName().equals(alreadyMigratedWriteIndex))); - - PlainActionFuture future = PlainActionFuture.newFuture(); - - resultsIndexUpgradeService.upgrade(ESIntegTestCase.client(), - new MlUpgradeAction.Request(), - ESIntegTestCase.client().admin().cluster().prepareState().get().getState(), - future); - - AcknowledgedResponse response = future.get(); - assertThat(response.isAcknowledged(), is(true)); - - assertThat(indexExists(job1Index), is(false)); - assertThat(indexExists(job2Index), is(false)); - assertThat(indexExists(job3Index), is(false)); - - ClusterState state = admin().cluster().state(new ClusterStateRequest()).actionGet().getState(); - String[] indices = indexNameExpressionResolver.concreteIndexNames(state, - IndicesOptions.strictExpandOpenAndForbidClosed(), - AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*"); - - // Our backing index size should be four as we have a shared and custom index and upgrading doubles the number of indices - Assert.assertThat(indices.length, equalTo(4)); - refresh(indices); - - assertThat(getJobResultsCount(job1.getId()), equalTo(job1Total)); - assertThat(getJobResultsCount(job2.getId()), equalTo(job2Total)); - assertThat(getJobResultsCount(job3.getId()), equalTo(job3Total)); - - // WE should still be able to write, and the aliases should allow to read from the appropriate indices - postDataToJob(jobId1); - postDataToJob(jobId2); - postDataToJob(jobId3); - - refresh(indices); - - long newJob1Total = getJobResultsCount(job1.getId()); - assertThat(newJob1Total, greaterThan(job1Total)); - - long newJob2Total = getJobResultsCount(job2.getId()); - assertThat(newJob2Total, greaterThan(job2Total)); - - long newJob3Total = getJobResultsCount(job3.getId()); - assertThat(newJob3Total, greaterThan(job3Total)); - } - - private long getTotalDocCount(String indexName) { - SearchResponse searchResponse = ESIntegTestCase.client().prepareSearch(indexName) - .setSize(10_000) - .setTrackTotalHits(true) - .setQuery(QueryBuilders.matchAllQuery()) - .execute().actionGet(); - return searchResponse.getHits().getTotalHits().value; - } - - private long getJobResultsCount(String jobId) { - String index = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + jobId; - return getTotalDocCount(index); - } - - private void postDataToJob(String jobId) throws Exception { - openJob(jobId); - ESTestCase.assertBusy(() -> Assert.assertEquals(getJobStats(jobId).get(0).getState(), JobState.OPENED)); - startDatafeed(jobId + "-datafeed", 0L, System.currentTimeMillis()); - waitUntilJobIsClosed(jobId); - } - - private Job createAndOpenJobAndStartDataFeedWithData(String jobId, String dataIndex, boolean isCustom) throws Exception { - Job.Builder jobbuilder = createScheduledJob(jobId); - if (isCustom) { - jobbuilder.setResultsIndexName(jobId); - } - registerJob(jobbuilder); - - Job job = putJob(jobbuilder).getResponse(); - - openJob(job.getId()); - ESTestCase.assertBusy(() -> Assert.assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED)); - - DatafeedConfig.Builder builder = createDatafeedBuilder(job.getId() + "-datafeed", - job.getId(), - Collections.singletonList(dataIndex)); - builder.setQueryDelay(TimeValue.timeValueSeconds(5)); - builder.setFrequency(TimeValue.timeValueSeconds(5)); - DatafeedConfig datafeedConfig = builder.build(); - registerDatafeed(datafeedConfig); - putDatafeed(datafeedConfig); - startDatafeed(datafeedConfig.getId(), 0L, System.currentTimeMillis()); - waitUntilJobIsClosed(jobId); - return job; - } - - private Tuple createDataIndex() { - ESIntegTestCase.client().admin().indices().prepareCreate("data-for-migration-1") - .addMapping("type", "time", "type=date") - .get(); - long numDocs = ESTestCase.randomIntBetween(32, 512); - long now = System.currentTimeMillis(); - long oneWeekAgo = now - 604800000; - long twoWeeksAgo = oneWeekAgo - 604800000; - indexDocs(logger, "data-for-migration-1", numDocs, twoWeeksAgo, oneWeekAgo); - return new Tuple<>(numDocs, "data-for-migration-1"); - } - - private List createJobsWithData(String sharedJobId1, String sharedJobId2, String customJobId, String dataIndex) throws Exception { - - Job job1 = createAndOpenJobAndStartDataFeedWithData(sharedJobId1, dataIndex, false); - Job job2 = createAndOpenJobAndStartDataFeedWithData(sharedJobId2, dataIndex, false); - Job job3 = createAndOpenJobAndStartDataFeedWithData(customJobId, dataIndex, true); - - return Arrays.asList(job1, job2, job3); - } -} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 43674d42a56e6..39316389b0496 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -96,7 +96,6 @@ import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; import org.elasticsearch.xpack.core.ml.action.PutFilterAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; -import org.elasticsearch.xpack.core.ml.action.MlUpgradeAction; import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; @@ -152,7 +151,6 @@ import org.elasticsearch.xpack.ml.action.TransportPutDatafeedAction; import org.elasticsearch.xpack.ml.action.TransportPutFilterAction; import org.elasticsearch.xpack.ml.action.TransportPutJobAction; -import org.elasticsearch.xpack.ml.action.TransportMlUpgradeAction; import org.elasticsearch.xpack.ml.action.TransportRevertModelSnapshotAction; import org.elasticsearch.xpack.ml.action.TransportStartDatafeedAction; import org.elasticsearch.xpack.ml.action.TransportStopDatafeedAction; @@ -232,7 +230,6 @@ import org.elasticsearch.xpack.ml.rest.results.RestGetInfluencersAction; import org.elasticsearch.xpack.ml.rest.results.RestGetOverallBucketsAction; import org.elasticsearch.xpack.ml.rest.results.RestGetRecordsAction; -import org.elasticsearch.xpack.ml.rest.results.RestUpgradeMlAction; import org.elasticsearch.xpack.ml.rest.validate.RestValidateDetectorAction; import org.elasticsearch.xpack.ml.rest.validate.RestValidateJobConfigAction; @@ -545,8 +542,7 @@ public List getRestHandlers(Settings settings, RestController restC new RestPutCalendarJobAction(settings, restController), new RestGetCalendarEventsAction(settings, restController), new RestPostCalendarEventAction(settings, restController), - new RestFindFileStructureAction(settings, restController), - new RestUpgradeMlAction(settings, restController) + new RestFindFileStructureAction(settings, restController) ); } @@ -604,8 +600,7 @@ public List getRestHandlers(Settings settings, RestController restC new ActionHandler<>(GetCalendarEventsAction.INSTANCE, TransportGetCalendarEventsAction.class), new ActionHandler<>(PostCalendarEventsAction.INSTANCE, TransportPostCalendarEventsAction.class), new ActionHandler<>(PersistJobAction.INSTANCE, TransportPersistJobAction.class), - new ActionHandler<>(FindFileStructureAction.INSTANCE, TransportFindFileStructureAction.class), - new ActionHandler<>(MlUpgradeAction.INSTANCE, TransportMlUpgradeAction.class) + new ActionHandler<>(FindFileStructureAction.INSTANCE, TransportFindFileStructureAction.class) ); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/ResultsIndexUpgradeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/ResultsIndexUpgradeService.java deleted file mode 100644 index ccbaed13feca0..0000000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/ResultsIndexUpgradeService.java +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.index.reindex.ReindexAction; -import org.elasticsearch.index.reindex.ReindexRequest; -import org.elasticsearch.index.reindex.ScrollableHitSource; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.core.ml.action.MlUpgradeAction; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; -import org.elasticsearch.xpack.ml.utils.TypedChainTaskExecutor; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.function.Function; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; - -/** - * ML Job results index upgrade service - */ -public class ResultsIndexUpgradeService { - - private static final Logger logger = LogManager.getLogger(ResultsIndexUpgradeService.class); - - // Adjust the following constants as necessary for various versions and backports. - private static final int INDEX_VERSION = Version.CURRENT.major; - private static final Version MIN_REQUIRED_VERSION = Version.CURRENT.minimumCompatibilityVersion(); - - private final IndexNameExpressionResolver indexNameExpressionResolver; - private final Predicate shouldUpgrade; - private final String executor; - - /** - * Construct a new upgrade service - * - * @param indexNameExpressionResolver Index expression resolver for the request - * @param executor Where to execute client calls - * @param shouldUpgrade Given IndexMetadata indicate if it should be upgraded or not - * {@code true} indicates that it SHOULD upgrade - */ - public ResultsIndexUpgradeService(IndexNameExpressionResolver indexNameExpressionResolver, - String executor, - Predicate shouldUpgrade) { - this.indexNameExpressionResolver = indexNameExpressionResolver; - this.shouldUpgrade = shouldUpgrade; - this.executor = executor; - } - - public static boolean wasIndexCreatedInCurrentMajorVersion(IndexMetaData indexMetaData) { - return indexMetaData.getCreationVersion().major == INDEX_VERSION; - } - - /** - * There are two reasons for these indices to exist: - * 1. The upgrade process has ran before and either failed for some reason, or the end user is simply running it again. - * Either way, it should be ok to proceed as this action SHOULD be idempotent, - * unless the shouldUpgrade predicate is poorly formed - * 2. This index was created manually by the user. If the index was created manually and actually needs upgrading, then - * we consider the "new index" to be invalid as the passed predicate indicates that it still needs upgrading. - * - * @param metaData Cluster metadata - * @param newIndexName The index to check - * @param shouldUpgrade Should be index be upgraded - * @return {@code true} if the "new index" is valid - */ - private static boolean validNewIndex(MetaData metaData, String newIndexName, Predicate shouldUpgrade) { - return (metaData.hasIndex(newIndexName) && shouldUpgrade.test(metaData.index(newIndexName))) == false; - } - - private static void validateMinNodeVersion(ClusterState clusterState) { - if (clusterState.nodes().getMinNodeVersion().before(MIN_REQUIRED_VERSION)) { - throw new IllegalStateException("All nodes should have at least version [" + MIN_REQUIRED_VERSION + "] to upgrade"); - } - } - - // This method copies the behavior of the normal {index}/_upgrade rest response handler - private static Tuple getStatusAndCause(BulkByScrollResponse response) { - /* - * Return the highest numbered rest status under the assumption that higher numbered statuses are "more error" - * and thus more interesting to the user. - */ - RestStatus status = RestStatus.OK; - Throwable cause = null; - if (response.isTimedOut()) { - status = RestStatus.REQUEST_TIMEOUT; - cause = new ElasticsearchTimeoutException("Reindex request timed out"); - } - for (BulkItemResponse.Failure failure : response.getBulkFailures()) { - if (failure.getStatus().getStatus() > status.getStatus()) { - status = failure.getStatus(); - cause = failure.getCause(); - } - } - for (ScrollableHitSource.SearchFailure failure : response.getSearchFailures()) { - RestStatus failureStatus = ExceptionsHelper.status(failure.getReason()); - if (failureStatus.getStatus() > status.getStatus()) { - status = failureStatus; - cause = failure.getReason(); - } - } - return new Tuple<>(status, cause); - } - - /** - * Upgrade the indices given in the request. - * - * @param client The client to use when making calls - * @param request The upgrade request - * @param state The current cluster state - * @param listener The listener to alert when actions have completed - */ - public void upgrade(Client client, MlUpgradeAction.Request request, ClusterState state, - ActionListener listener) { - try { - validateMinNodeVersion(state); - String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), request.indices()); - MetaData metaData = state.getMetaData(); - - List indicesToUpgrade = Arrays.stream(concreteIndices) - .filter(indexName -> shouldUpgrade.test(metaData.index(indexName))) - .collect(Collectors.toList()); - - // All the internal indices are up to date - if (indicesToUpgrade.isEmpty()) { - listener.onResponse(new AcknowledgedResponse(true)); - return; - } - - IndexNameAndAliasProvider indexNameAndAliasProvider = new IndexNameAndAliasProvider(indicesToUpgrade, metaData); - Exception validationException = indexNameAndAliasProvider.validate(metaData, shouldUpgrade); - if (validationException != null) { - listener.onFailure(validationException); - return; - } - - // <7> Now that we have deleted the old indices, we are complete, alert the user - ActionListener deleteIndicesListener = ActionListener.wrap( - listener::onResponse, - error -> { - String msg = "Failed to delete old indices: " + Strings.collectionToCommaDelimitedString(indicesToUpgrade); - logger.error(msg, error); - listener.onFailure(new ElasticsearchException(msg, error)); - } - ); - - // <6> Now that aliases are moved, need to delete the old indices - ActionListener readAliasListener = ActionListener.wrap( - resp -> deleteOldIndices(client, indicesToUpgrade, deleteIndicesListener), - error -> { - String msg = "Failed adjusting aliases from old indices to new."; - logger.error(msg, error); - listener.onFailure(new ElasticsearchException(msg, error)); - } - ); - - // <5> Documents are now reindexed, time to move read aliases - ActionListener reindexListener = ActionListener.wrap( - resp -> - // Need to make indices writable again so that the aliases can be removed from them - removeReadOnlyBlock(client, indicesToUpgrade, - ActionListener.wrap( - rrob -> adjustAliases(client, - indexNameAndAliasProvider.oldIndicesWithReadAliases(), - indexNameAndAliasProvider.newReadIndicesWithReadAliases(), - readAliasListener), - rrobFailure -> { - String msg = "Failed making old indices writable again so that aliases can be moved."; - logger.error(msg, rrobFailure); - listener.onFailure(new ElasticsearchException(msg, rrobFailure)); - }) - ), - error -> { - logger.error("Failed to reindex old read-only indices", error); - removeReadOnlyBlock(client, indicesToUpgrade, ActionListener.wrap( - empty -> listener.onFailure(error), - removeReadOnlyBlockError -> { - String msg = "Failed making old indices read/write again after failing to reindex: " + error.getMessage(); - logger.error(msg, removeReadOnlyBlockError); - listener.onFailure(new ElasticsearchException(msg, removeReadOnlyBlockError)); - } - )); - } - ); - - // <4> Old indexes are now readOnly, Time to reindex - ActionListener readOnlyListener = ActionListener.wrap( - ack -> reindexOldReadIndicesToNewIndices(client, indexNameAndAliasProvider.needsReindex(), request, reindexListener), - listener::onFailure - ); - - // <3> Set old indices to readOnly - ActionListener writeAliasesMovedListener = ActionListener.wrap( - resp -> setReadOnlyBlock(client, indicesToUpgrade, readOnlyListener), - listener::onFailure - ); - - // <2> Move write index alias to new write indices - ActionListener createWriteIndicesAndSetReadAliasListener = ActionListener.wrap( - resp -> adjustAliases(client, - indexNameAndAliasProvider.oldIndicesWithWriteAliases(), - indexNameAndAliasProvider.newWriteIndicesWithWriteAliases(), - writeAliasesMovedListener), - listener::onFailure - ); - - // <1> Create the new write indices and set the read aliases to include them - createNewWriteIndicesIfNecessary(client, metaData, indexNameAndAliasProvider.newWriteIndices(), - ActionListener.wrap( - indicesCreated -> adjustAliases(client, - Collections.emptyMap(), - indexNameAndAliasProvider.newWriteIndicesWithReadAliases(), - createWriteIndicesAndSetReadAliasListener), - listener::onFailure - )); - - } catch (Exception e) { - listener.onFailure(e); - } - - } - - private void createNewWriteIndicesIfNecessary(Client client, - MetaData metaData, - Collection newWriteIndices, - ActionListener createIndexListener) { - TypedChainTaskExecutor chainTaskExecutor = - new TypedChainTaskExecutor<>( - client.threadPool().executor(executor), - (createIndexResponse -> true), //We always want to complete all our tasks - (exception -> - // Short circuit execution IF the exception is NOT a ResourceAlreadyExistsException - // This should be rare, as it requires the index to be created between our previous check and this exception - exception instanceof ResourceAlreadyExistsException == false - )); - newWriteIndices.forEach((index) -> { - // If the index already exists, don't try and created it - // We have already verified that IF this index exists, that it does not require upgrading - // So, if it was created between that check and this one, we can assume it is the correct version as it was JUST created - if (metaData.hasIndex(index) == false) { - CreateIndexRequest request = new CreateIndexRequest(index); - chainTaskExecutor.add(listener -> - executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ML_ORIGIN, - request, - listener, - client.admin().indices()::create)); - } - }); - - chainTaskExecutor.execute(ActionListener.wrap( - createIndexResponses -> createIndexListener.onResponse(true), - createIndexListener::onFailure - )); - } - - /** - * Makes the indices readonly if it's not set as a readonly yet - */ - private void setReadOnlyBlock(Client client, List indices, ActionListener listener) { - Settings settings = Settings.builder().put(IndexMetaData.INDEX_READ_ONLY_SETTING.getKey(), true).build(); - UpdateSettingsRequest request = new UpdateSettingsRequest(indices.toArray(new String[0])); - request.settings(settings); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ML_ORIGIN, - request, - listener, - client.admin().indices()::updateSettings); - } - - private void removeReadOnlyBlock(Client client, List indices, - ActionListener listener) { - Settings settings = Settings.builder().put(IndexMetaData.INDEX_READ_ONLY_SETTING.getKey(), false).build(); - UpdateSettingsRequest request = new UpdateSettingsRequest(indices.toArray(new String[0])); - request.settings(settings); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ML_ORIGIN, - request, - listener, - client.admin().indices()::updateSettings); - } - - private void reindexOldReadIndicesToNewIndices(Client client, - Map reindexIndices, - MlUpgradeAction.Request request, - ActionListener listener) { - TypedChainTaskExecutor chainTaskExecutor = - new TypedChainTaskExecutor<>( - client.threadPool().executor(executor), - (createIndexResponse) -> { // If there are errors in the reindex, we should stop - Tuple status = getStatusAndCause(createIndexResponse); - return status.v1().equals(RestStatus.OK); - }, - (exception -> true)); // Short circuit and call onFailure for any exception - - List newIndices = new ArrayList<>(reindexIndices.size()); - reindexIndices.forEach((oldIndex, newIndex) -> { - ReindexRequest reindexRequest = new ReindexRequest(); - reindexRequest.setSourceBatchSize(request.getReindexBatchSize()); - reindexRequest.setSourceIndices(oldIndex); - reindexRequest.setDestIndex(newIndex); - reindexRequest.setSourceDocTypes(ElasticsearchMappings.DOC_TYPE); - reindexRequest.setDestDocType(ElasticsearchMappings.DOC_TYPE); - // Don't worry if these indices already exist, we validated settings.index.created.version earlier - reindexRequest.setAbortOnVersionConflict(false); - // If the document exists already in the new index, don't want to update or overwrite as we are pulling from "old data" - reindexRequest.setDestOpType(DocWriteRequest.OpType.CREATE.getLowercase()); - newIndices.add(newIndex); - chainTaskExecutor.add(chainedListener -> - executeAsyncWithOrigin(client, - ML_ORIGIN, - ReindexAction.INSTANCE, - reindexRequest, - chainedListener)); - }); - - chainTaskExecutor.execute(ActionListener.wrap( - bulkScrollingResponses -> { - BulkByScrollResponse response = bulkScrollingResponses.get(bulkScrollingResponses.size() - 1); - Tuple status = getStatusAndCause(response); - if (status.v1().equals(RestStatus.OK)) { - listener.onResponse(true); - } else { - logger.error("Failed to reindex old results indices.", status.v2()); - listener.onFailure(new ElasticsearchException("Failed to reindex old results indices.",status.v2())); - } - }, - failure -> { - List createdIndices = newIndices.subList(0, chainTaskExecutor.getCollectedResponses().size()); - logger.error( - "Failed to reindex all old read indices. Successfully reindexed: [" + - Strings.collectionToCommaDelimitedString(createdIndices) + "]", - failure); - listener.onFailure(failure); - } - )); - - } - - private void deleteOldIndices(Client client, - List oldIndices, - ActionListener deleteIndicesListener) { - DeleteIndexRequest request = new DeleteIndexRequest(oldIndices.toArray(new String[0])); - request.indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ML_ORIGIN, - request, - deleteIndicesListener, - client.admin().indices()::delete); - } - - private void adjustAliases(Client client, - Map> oldAliases, - Map> newAliases, - ActionListener indicesAliasListener) { - IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); - oldAliases.forEach((oldIndex, aliases) -> - { - if (aliases.isEmpty() == false) { //if the aliases are empty, that means there are none to remove - indicesAliasesRequest.addAliasAction(IndicesAliasesRequest - .AliasActions - .remove() - .index(oldIndex) - .aliases(aliases.stream().map(Alias::name).toArray(String[]::new))); - } - } - ); - newAliases.forEach((newIndex, aliases) -> - aliases.forEach(alias -> { - IndicesAliasesRequest.AliasActions action = IndicesAliasesRequest.AliasActions.add().index(newIndex); - if (alias.filter() != null) { - action.filter(alias.filter()); - } - action.alias(alias.name()); - indicesAliasesRequest.addAliasAction(action); - }) - ); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ML_ORIGIN, - indicesAliasesRequest, - indicesAliasListener, - client.admin().indices()::aliases); - } - - - private static class IndexNameAndAliasProvider { - - private final List oldIndices; - private final Map> writeAliases = new HashMap<>(); - private final Map> readAliases = new HashMap<>(); - - private IndexNameAndAliasProvider(List oldIndices, MetaData metaData) { - this.oldIndices = oldIndices; - oldIndices.forEach(index -> { - IndexMetaData indexMetaData = metaData.index(index); - List writes = new ArrayList<>(); - List reads = new ArrayList<>(); - indexMetaData.getAliases().forEach(aliasCursor -> { - Alias alias = new Alias(aliasCursor.value.alias()); - if (aliasCursor.value.filteringRequired()) { - alias.filter(aliasCursor.value.getFilter().string()); //Set the read alias jobId filter - } - if (alias.name().contains(".write-")) { - writes.add(alias); - } else { - reads.add(alias); - } - }); - - writeAliases.put(index, writes); - readAliases.put(index, reads); - }); - } - - private Exception validate(MetaData metaData, Predicate shouldUpgrade) { - for (String index : oldIndices) { - String newWriteName = newWriteName(index); - // If the "new" indices exist, either they were created from a previous run of the upgrade process or the end user - if (validNewIndex(metaData, newWriteName, shouldUpgrade) == false) { - return new IllegalStateException("Index [" + newWriteName + "] already exists and is not the current version."); - } - - String newReadName = newReadName(index); - if (validNewIndex(metaData, newReadName, shouldUpgrade) == false) { - return new IllegalStateException("Index [" + newReadName + "] already exists and is not the current version."); - } - } - return null; - } - - private String newReadName(String oldIndexName) { - return oldIndexName + "-" + INDEX_VERSION + "r"; - } - - private String newWriteName(String oldIndexName) { - return oldIndexName + "-" + INDEX_VERSION; - } - - private List newWriteIndices() { - return oldIndices.stream().map(this::newWriteName).collect(Collectors.toList()); - } - - private List readAliases(String oldIndex) { - return readAliases.get(oldIndex); - } - - private List writeAliases(String oldIndex) { - return writeAliases.get(oldIndex); - } - - private Map> newWriteIndicesWithReadAliases() { - return oldIndices.stream().collect(Collectors.toMap(this::newWriteName, this::readAliases)); - } - - private Map> oldIndicesWithWriteAliases() { - return writeAliases; - } - - private Map> newWriteIndicesWithWriteAliases() { - return oldIndices.stream().collect(Collectors.toMap(this::newWriteName, this::writeAliases)); - } - - private Map> oldIndicesWithReadAliases() { - return readAliases; - } - - private Map> newReadIndicesWithReadAliases() { - return oldIndices.stream().collect(Collectors.toMap(this::newReadName, this::readAliases)); - } - - private Map needsReindex() { - return oldIndices.stream().collect(Collectors.toMap(Function.identity(), this::newReadName)); - } - } -} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlUpgradeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlUpgradeAction.java deleted file mode 100644 index 2b676277aa690..0000000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlUpgradeAction.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.action; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.ParentTaskAssigningClient; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.action.MlUpgradeAction; -import org.elasticsearch.xpack.ml.ResultsIndexUpgradeService; - -import static org.elasticsearch.xpack.ml.ResultsIndexUpgradeService.wasIndexCreatedInCurrentMajorVersion; - -public class TransportMlUpgradeAction - extends TransportMasterNodeReadAction { - - private final Client client; - private final ResultsIndexUpgradeService resultsIndexUpgradeService; - - @Inject - public TransportMlUpgradeAction(TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, Client client, - IndexNameExpressionResolver indexNameExpressionResolver) { - super(MlUpgradeAction.NAME, transportService, clusterService, threadPool, - actionFilters, MlUpgradeAction.Request::new, indexNameExpressionResolver); - this.client = client; - this.resultsIndexUpgradeService = new ResultsIndexUpgradeService(indexNameExpressionResolver, - executor(), - indexMetadata -> wasIndexCreatedInCurrentMajorVersion(indexMetadata) == false); - } - - @Override - protected void masterOperation(Task task, MlUpgradeAction.Request request, ClusterState state, - ActionListener listener) { - TaskId taskId = new TaskId(clusterService.localNode().getId(), task.getId()); - ParentTaskAssigningClient parentAwareClient = new ParentTaskAssigningClient(client, taskId); - try { - resultsIndexUpgradeService.upgrade(parentAwareClient, request, state, listener); - } catch (Exception e) { - listener.onFailure(e); - } - } - - @Override - protected final void masterOperation(MlUpgradeAction.Request request, ClusterState state, - ActionListener listener) { - throw new UnsupportedOperationException("the task parameter is required"); - } - - @Override - protected String executor() { - return ThreadPool.Names.SAME; - } - - @Override - protected AcknowledgedResponse newResponse() { - return new AcknowledgedResponse(); - } - - @Override - protected ClusterBlockException checkBlock(MlUpgradeAction.Request request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); - } -} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestUpgradeMlAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestUpgradeMlAction.java deleted file mode 100644 index cad82ce325c27..0000000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestUpgradeMlAction.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.rest.results; - -import org.apache.logging.log4j.LogManager; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.tasks.LoggingTaskListener; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.xpack.core.ml.action.MlUpgradeAction; -import org.elasticsearch.xpack.ml.MachineLearning; - -import java.io.IOException; - -import static org.elasticsearch.rest.RestRequest.Method.POST; - -public class RestUpgradeMlAction extends BaseRestHandler { - - private static final DeprecationLogger deprecationLogger = - new DeprecationLogger(LogManager.getLogger(RestUpgradeMlAction.class)); - - public RestUpgradeMlAction(Settings settings, RestController controller) { - super(settings); - controller.registerWithDeprecatedHandler( - POST, - MachineLearning.BASE_PATH + "_upgrade", - this, - POST, - MachineLearning.PRE_V7_BASE_PATH + "_upgrade", - deprecationLogger); - } - - @Override - public String getName() { - return "xpack_ml_upgrade_action"; - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - MlUpgradeAction.Request parsedRequest = new MlUpgradeAction.Request(); - if (restRequest.hasContent()) { - XContentParser parser = restRequest.contentParser(); - parsedRequest = MlUpgradeAction.Request.PARSER.apply(parser, null); - } - final MlUpgradeAction.Request upgradeRequest = parsedRequest; - - if (restRequest.paramAsBoolean("wait_for_completion", false)) { - return channel -> client.execute(MlUpgradeAction.INSTANCE, upgradeRequest, new RestToXContentListener<>(channel)); - } else { - upgradeRequest.setShouldStoreResult(true); - - Task task = client.executeLocally(MlUpgradeAction.INSTANCE, upgradeRequest, LoggingTaskListener.instance()); - // Send task description id instead of waiting for the message - return channel -> { - try (XContentBuilder builder = channel.newBuilder()) { - builder.startObject(); - builder.field("task", client.getLocalNodeId() + ":" + task.getId()); - builder.endObject(); - channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); - } - }; - } - } -} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.upgrade.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.upgrade.json deleted file mode 100644 index b67b125bb692a..0000000000000 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.upgrade.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "ml.upgrade": { - "documentation": "TODO", - "methods": [ "POST" ], - "url": { - "path": "/_ml/_upgrade", - "paths": [ "/_ml/_upgrade" ], - "params": { - "wait_for_completion": { - "type": "boolean", - "description": "Should this request wait until the operation has completed before returning", - "default": false - } - } - }, - "body": { - "description" : "Upgrade options", - "required" : false - } - } -} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_upgrade.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_upgrade.yml deleted file mode 100644 index ee1f9f77f9325..0000000000000 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_upgrade.yml +++ /dev/null @@ -1,70 +0,0 @@ -setup: - - skip: - features: headers - - do: - headers: - Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - ml.put_job: - job_id: jobs-upgrade-results - body: > - { - "analysis_config" : { - "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] - }, - "data_description" : { - "format":"xcontent", - "time_field":"time" - } - } - - - do: - headers: - Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - Content-Type: application/json - index: - index: .ml-anomalies-jobs-upgrade-results - type: doc - id: "jobs-upgrade-results_1464739200000_1" - body: - { - "job_id": "jobs-upgrade-results", - "result_type": "bucket", - "timestamp": "2016-06-01T00:00:00Z", - "anomaly_score": 90.0, - "bucket_span":1 - } - - - do: - headers: - Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - indices.refresh: - index: .ml-anomalies-jobs-upgrade-results - ---- -"Upgrade results when there is nothing to upgrade": - - do: - ml.upgrade: - wait_for_completion: true - - - match: { acknowledged: true } - - - do: - indices.exists: - index: .ml-anomalies-shared - - - is_true: '' ---- -"Upgrade results when there is nothing to upgrade not waiting for results": - - do: - ml.upgrade: - wait_for_completion: false - - - match: {task: '/.+:\d+/'} - - set: {task: task} - - - do: - tasks.get: - wait_for_completion: true - task_id: $task - - match: {completed: true} - - match: {response.acknowledged: true} diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_ml_results_upgrade.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_ml_results_upgrade.yml deleted file mode 100644 index 73478be65597e..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_ml_results_upgrade.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -"Verify jobs exist": - - do: - ml.get_jobs: - job_id: old-cluster-job-to-upgrade - - match: { count: 1 } - - - do: - ml.get_jobs: - job_id: old-cluster-job-to-upgrade-custom - - match: { count: 1 } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_ml_results_upgrade.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_ml_results_upgrade.yml deleted file mode 100644 index d21b5e6def61d..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_ml_results_upgrade.yml +++ /dev/null @@ -1,120 +0,0 @@ ---- -"Put job on the old cluster and post some data": - - - do: - ml.put_job: - job_id: old-cluster-job-to-upgrade - body: > - { - "description":"Cluster upgrade", - "analysis_config" : { - "bucket_span": "60s", - "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] - }, - "analysis_limits" : { - "model_memory_limit": "50mb" - }, - "data_description" : { - "format":"xcontent", - "time_field":"time", - "time_format":"epoch" - } - } - - match: { job_id: old-cluster-job-to-upgrade } - - - do: - ml.open_job: - job_id: old-cluster-job-to-upgrade - - - do: - ml.post_data: - job_id: old-cluster-job-to-upgrade - body: - - airline: AAL - responsetime: 132.2046 - sourcetype: post-data-job - time: 1403481600 - - airline: JZA - responsetime: 990.4628 - sourcetype: post-data-job - time: 1403481700 - - match: { processed_record_count: 2 } - - - do: - ml.close_job: - job_id: old-cluster-job-to-upgrade - - - do: - ml.get_buckets: - job_id: old-cluster-job-to-upgrade - - match: { count: 1 } - -# Wait for indices to be fully allocated before -# killing the node - - do: - cluster.health: - index: [".ml-state", ".ml-anomalies-shared"] - wait_for_status: green - ---- -"Put job on the old cluster with a custom index": - - do: - ml.put_job: - job_id: old-cluster-job-to-upgrade-custom - body: > - { - "description":"Cluster upgrade", - "analysis_config" : { - "bucket_span": "60s", - "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] - }, - "analysis_limits" : { - "model_memory_limit": "50mb" - }, - "data_description" : { - "format":"xcontent", - "time_field":"time", - "time_format":"epoch" - }, - "results_index_name": "old-cluster-job-to-upgrade-custom" - } - - match: { job_id: old-cluster-job-to-upgrade-custom } - - - do: - ml.open_job: - job_id: old-cluster-job-to-upgrade-custom - - - do: - ml.post_data: - job_id: old-cluster-job-to-upgrade-custom - body: - - airline: AAL - responsetime: 132.2046 - sourcetype: post-data-job - time: 1403481600 - - airline: JZA - responsetime: 990.4628 - sourcetype: post-data-job - time: 1403481700 - - airline: JZA - responsetime: 423.0000 - sourcetype: post-data-job - time: 1403481800 - - match: { processed_record_count: 3 } - - - do: - ml.close_job: - job_id: old-cluster-job-to-upgrade-custom - - - do: - ml.get_buckets: - job_id: old-cluster-job-to-upgrade-custom - - match: { count: 3 } - -# Wait for indices to be fully allocated before -# killing the node - - do: - cluster.health: - index: [".ml-state", ".ml-anomalies-old-cluster-job-to-upgrade-custom"] - wait_for_status: green - diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_ml_results_upgrade.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_ml_results_upgrade.yml deleted file mode 100644 index f049b9c073ad8..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_ml_results_upgrade.yml +++ /dev/null @@ -1,158 +0,0 @@ ---- -"Migrate results data to latest index binary version": - # Verify that all the results are there and the typical indices exist - - do: - ml.get_buckets: - job_id: old-cluster-job-to-upgrade - - match: { count: 1 } - - - do: - ml.get_buckets: - job_id: old-cluster-job-to-upgrade-custom - - match: { count: 3 } - - - do: - indices.exists: - index: .ml-anomalies-shared - - - is_true: '' - - - do: - indices.get_settings: - index: .ml-anomalies-shared - name: index.version.created - - - match: { \.ml-anomalies-shared.settings.index.version.created: '/6\d+/' } - - - do: - indices.exists: - index: .ml-anomalies-custom-old-cluster-job-to-upgrade-custom - - - is_true: '' - - # Do the upgrade - - do: - ml.upgrade: - wait_for_completion: true - - - match: { acknowledged: true } - - # Verify that old indices are gone - - do: - indices.exists: - index: .ml-anomalies-shared - - - is_false: '' - - - do: - indices.exists: - index: .ml-anomalies-custom-old-cluster-job-to-upgrade-custom - - - is_false: '' - - # Verify that results can still be retrieved - - - do: - indices.refresh: {} - - - do: - ml.get_buckets: - job_id: old-cluster-job-to-upgrade - - match: { count: 1 } - - - do: - ml.get_buckets: - job_id: old-cluster-job-to-upgrade-custom - - match: { count: 3 } - - # Verify the created version is correct - - - do: - indices.get_settings: - index: .ml-anomalies-old-cluster-job-to-upgrade - name: index.version.created - - match: { \.ml-anomalies-shared-7.settings.index.version.created: '/7\d+/' } - - match: { \.ml-anomalies-shared-7r.settings.index.version.created: '/7\d+/' } - - - do: - indices.get_settings: - index: .ml-anomalies-old-cluster-job-to-upgrade-custom - name: index.version.created - - match: { \.ml-anomalies-custom-old-cluster-job-to-upgrade-custom-7.settings.index.version.created: '/7\d+/' } - - match: { \.ml-anomalies-custom-old-cluster-job-to-upgrade-custom-7r.settings.index.version.created: '/7\d+/' } - - # Create a new job to verify that the .ml-anomalies-shared index gets created again without issues - - - do: - ml.put_job: - job_id: upgraded-cluster-job-should-not-upgrade - body: > - { - "description":"Cluster upgrade", - "analysis_config" : { - "bucket_span": "60s", - "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] - }, - "analysis_limits" : { - "model_memory_limit": "50mb" - }, - "data_description" : { - "format":"xcontent", - "time_field":"time", - "time_format":"epoch" - } - } - - match: { job_id: upgraded-cluster-job-should-not-upgrade } - - - do: - ml.open_job: - job_id: upgraded-cluster-job-should-not-upgrade - - - do: - ml.post_data: - job_id: upgraded-cluster-job-should-not-upgrade - body: - - airline: AAL - responsetime: 132.2046 - sourcetype: post-data-job - time: 1403481600 - - airline: JZA - responsetime: 990.4628 - sourcetype: post-data-job - time: 1403481700 - - match: { processed_record_count: 2 } - - - do: - ml.close_job: - job_id: upgraded-cluster-job-should-not-upgrade - - - do: - ml.get_buckets: - job_id: upgraded-cluster-job-should-not-upgrade - - match: { count: 1 } - - - do: - indices.exists: - index: .ml-anomalies-shared - - - is_true: '' - - - do: - indices.get_settings: - index: .ml-anomalies-shared - name: index.version.created - - - match: { \.ml-anomalies-shared.settings.index.version.created: '/7\d+/' } - - # Do the upgrade Again as nothing needs upgraded now - - do: - ml.upgrade: - wait_for_completion: true - - - match: { acknowledged: true } - - - do: - indices.exists: - index: .ml-anomalies-shared - - - is_true: '' From eb7bf16427c6c9a07e248830afc53f6032cf30c0 Mon Sep 17 00:00:00 2001 From: Like Date: Sat, 26 Jan 2019 04:52:04 +0800 Subject: [PATCH 60/64] Migrate o.e.i.r.RecoveryState to Writeable (#37380) Relates to #34389 --- .../indices/recovery/RecoveryState.java | 267 +++++++++--------- .../indices/recovery/RecoveryTargetTests.java | 42 ++- 2 files changed, 155 insertions(+), 154 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index 9013cfa202d83..1fed238f8ddf6 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -46,7 +47,7 @@ /** * Keeps track of state related to shard recovery. */ -public class RecoveryState implements ToXContentFragment, Streamable { +public class RecoveryState implements ToXContentFragment, Streamable, Writeable { public enum Stage { INIT((byte) 0), @@ -102,20 +103,17 @@ public static Stage fromId(byte id) { private Stage stage; - private final Index index = new Index(); - private final Translog translog = new Translog(); - private final VerifyIndex verifyIndex = new VerifyIndex(); - private final Timer timer = new Timer(); + private final Index index; + private final Translog translog; + private final VerifyIndex verifyIndex; + private final Timer timer; private RecoverySource recoverySource; private ShardId shardId; @Nullable private DiscoveryNode sourceNode; private DiscoveryNode targetNode; - private boolean primary = false; - - private RecoveryState() { - } + private boolean primary; public RecoveryState(ShardRouting shardRouting, DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode) { assert shardRouting.initializing() : "only allow initializing shard routing to be recovered: " + shardRouting; @@ -128,9 +126,40 @@ public RecoveryState(ShardRouting shardRouting, DiscoveryNode targetNode, @Nulla this.sourceNode = sourceNode; this.targetNode = targetNode; stage = Stage.INIT; + index = new Index(); + translog = new Translog(); + verifyIndex = new VerifyIndex(); + timer = new Timer(); timer.start(); } + public RecoveryState(StreamInput in) throws IOException { + timer = new Timer(in); + stage = Stage.fromId(in.readByte()); + shardId = ShardId.readShardId(in); + recoverySource = RecoverySource.readFrom(in); + targetNode = new DiscoveryNode(in); + sourceNode = in.readOptionalWriteable(DiscoveryNode::new); + index = new Index(in); + translog = new Translog(in); + verifyIndex = new VerifyIndex(in); + primary = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + timer.writeTo(out); + out.writeByte(stage.id()); + shardId.writeTo(out); + recoverySource.writeTo(out); + targetNode.writeTo(out); + out.writeOptionalWriteable(sourceNode); + index.writeTo(out); + translog.writeTo(out); + verifyIndex.writeTo(out); + out.writeBoolean(primary); + } + public ShardId getShardId() { return shardId; } @@ -223,37 +252,12 @@ public boolean getPrimary() { } public static RecoveryState readRecoveryState(StreamInput in) throws IOException { - RecoveryState recoveryState = new RecoveryState(); - recoveryState.readFrom(in); - return recoveryState; + return new RecoveryState(in); } @Override public synchronized void readFrom(StreamInput in) throws IOException { - timer.readFrom(in); - stage = Stage.fromId(in.readByte()); - shardId = ShardId.readShardId(in); - recoverySource = RecoverySource.readFrom(in); - targetNode = new DiscoveryNode(in); - sourceNode = in.readOptionalWriteable(DiscoveryNode::new); - index.readFrom(in); - translog.readFrom(in); - verifyIndex.readFrom(in); - primary = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - timer.writeTo(out); - out.writeByte(stage.id()); - shardId.writeTo(out); - recoverySource.writeTo(out); - targetNode.writeTo(out); - out.writeOptionalWriteable(sourceNode); - index.writeTo(out); - translog.writeTo(out); - verifyIndex.writeTo(out); - out.writeBoolean(primary); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -347,12 +351,31 @@ static final class Fields { static final String TARGET_THROTTLE_TIME_IN_MILLIS = "target_throttle_time_in_millis"; } - public static class Timer implements Streamable { + public static class Timer implements Writeable { protected long startTime = 0; protected long startNanoTime = 0; protected long time = -1; protected long stopTime = 0; + public Timer() { + } + + public Timer(StreamInput in) throws IOException { + startTime = in.readVLong(); + startNanoTime = in.readVLong(); + stopTime = in.readVLong(); + time = in.readVLong(); + } + + @Override + public synchronized void writeTo(StreamOutput out) throws IOException { + out.writeVLong(startTime); + out.writeVLong(startNanoTime); + out.writeVLong(stopTime); + // write a snapshot of current time, which is not per se the time field + out.writeVLong(time()); + } + public synchronized void start() { assert startTime == 0 : "already started"; startTime = System.currentTimeMillis(); @@ -394,29 +417,24 @@ public synchronized void reset() { stopTime = 0; } + } - @Override - public synchronized void readFrom(StreamInput in) throws IOException { - startTime = in.readVLong(); - startNanoTime = in.readVLong(); - stopTime = in.readVLong(); - time = in.readVLong(); - } + public static class VerifyIndex extends Timer implements ToXContentFragment, Writeable { + private volatile long checkIndexTime; - @Override - public synchronized void writeTo(StreamOutput out) throws IOException { - out.writeVLong(startTime); - out.writeVLong(startNanoTime); - out.writeVLong(stopTime); - // write a snapshot of current time, which is not per se the time field - out.writeVLong(time()); + public VerifyIndex() { } - } - - public static class VerifyIndex extends Timer implements ToXContentFragment, Streamable { - private volatile long checkIndexTime; + public VerifyIndex(StreamInput in) throws IOException { + super(in); + checkIndexTime = in.readVLong(); + } + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(checkIndexTime); + } public void reset() { super.reset(); @@ -431,18 +449,6 @@ public void checkIndexTime(long checkIndexTime) { this.checkIndexTime = checkIndexTime; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - checkIndexTime = in.readVLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVLong(checkIndexTime); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.humanReadableField(Fields.CHECK_INDEX_TIME_IN_MILLIS, Fields.CHECK_INDEX_TIME, new TimeValue(checkIndexTime)); @@ -451,13 +457,31 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - public static class Translog extends Timer implements ToXContentFragment, Streamable { + public static class Translog extends Timer implements ToXContentFragment, Writeable { public static final int UNKNOWN = -1; private int recovered; private int total = UNKNOWN; private int totalOnStart = UNKNOWN; + public Translog() { + } + + public Translog(StreamInput in) throws IOException { + super(in); + recovered = in.readVInt(); + total = in.readVInt(); + totalOnStart = in.readVInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(recovered); + out.writeVInt(total); + out.writeVInt(totalOnStart); + } + public synchronized void reset() { super.reset(); recovered = 0; @@ -533,22 +557,6 @@ public synchronized float recoveredPercent() { return recovered * 100.0f / total; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - recovered = in.readVInt(); - total = in.readVInt(); - totalOnStart = in.readVInt(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(recovered); - out.writeVInt(total); - out.writeVInt(totalOnStart); - } - @Override public synchronized XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(Fields.RECOVERED, recovered); @@ -560,7 +568,7 @@ public synchronized XContentBuilder toXContent(XContentBuilder builder, Params p } } - public static class File implements ToXContentObject, Streamable { + public static class File implements ToXContentObject, Writeable { private String name; private long length; private long recovered; @@ -576,6 +584,21 @@ public File(String name, long length, boolean reused) { this.reused = reused; } + public File(StreamInput in) throws IOException { + name = in.readString(); + length = in.readVLong(); + recovered = in.readVLong(); + reused = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeVLong(length); + out.writeVLong(recovered); + out.writeBoolean(reused); + } + void addRecoveredBytes(long bytes) { assert reused == false : "file is marked as reused, can't update recovered bytes"; assert bytes >= 0 : "can't recovered negative bytes. got [" + bytes + "]"; @@ -614,28 +637,6 @@ boolean fullyRecovered() { return reused == false && length == recovered; } - public static File readFile(StreamInput in) throws IOException { - File file = new File(); - file.readFrom(in); - return file; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - length = in.readVLong(); - recovered = in.readVLong(); - reused = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - out.writeVLong(length); - out.writeVLong(recovered); - out.writeBoolean(reused); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -671,7 +672,7 @@ public String toString() { } } - public static class Index extends Timer implements ToXContentFragment, Streamable { + public static class Index extends Timer implements ToXContentFragment, Writeable { private Map fileDetails = new HashMap<>(); @@ -681,6 +682,32 @@ public static class Index extends Timer implements ToXContentFragment, Streamabl private long sourceThrottlingInNanos = UNKNOWN; private long targetThrottleTimeInNanos = UNKNOWN; + public Index() { + } + + public Index(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + File file = new File(in); + fileDetails.put(file.name, file); + } + sourceThrottlingInNanos = in.readLong(); + targetThrottleTimeInNanos = in.readLong(); + } + + @Override + public synchronized void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + final File[] files = fileDetails.values().toArray(new File[0]); + out.writeVInt(files.length); + for (File file : files) { + file.writeTo(out); + } + out.writeLong(sourceThrottlingInNanos); + out.writeLong(targetThrottleTimeInNanos); + } + public synchronized List fileDetails() { return Collections.unmodifiableList(new ArrayList<>(fileDetails.values())); } @@ -883,30 +910,6 @@ public synchronized void updateVersion(long version) { this.version = version; } - @Override - public synchronized void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - File file = File.readFile(in); - fileDetails.put(file.name, file); - } - sourceThrottlingInNanos = in.readLong(); - targetThrottleTimeInNanos = in.readLong(); - } - - @Override - public synchronized void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - final File[] files = fileDetails.values().toArray(new File[0]); - out.writeVInt(files.length); - for (File file : files) { - file.writeTo(out); - } - out.writeLong(sourceThrottlingInNanos); - out.writeLong(targetThrottleTimeInNanos); - } - @Override public synchronized XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { // stream size first, as it matters more and the files section can be long diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java index 7a65541cb5eaf..1c2b5331fef30 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.RecoveryState.File; import org.elasticsearch.indices.recovery.RecoveryState.Index; @@ -57,7 +57,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; public class RecoveryTargetTests extends ESTestCase { - abstract class Streamer extends Thread { + abstract class Streamer extends Thread { private T lastRead; private final AtomicBoolean shouldStop; private final T source; @@ -93,12 +93,10 @@ public T serializeDeserialize() throws IOException { } protected T deserialize(StreamInput in) throws IOException { - T obj = createObj(); - obj.readFrom(in); - return obj; + return createObj(in); } - abstract T createObj(); + abstract T createObj(StreamInput in) throws IOException; @Override public void run() { @@ -121,32 +119,32 @@ public void testTimers() throws Throwable { timer = new Timer(); streamer = new Streamer(stop, timer) { @Override - Timer createObj() { - return new Timer(); + Timer createObj(StreamInput in) throws IOException { + return new Timer(in); } }; } else if (randomBoolean()) { timer = new Index(); streamer = new Streamer(stop, timer) { @Override - Timer createObj() { - return new Index(); + Timer createObj(StreamInput in) throws IOException { + return new Index(in); } }; } else if (randomBoolean()) { timer = new VerifyIndex(); streamer = new Streamer(stop, timer) { @Override - Timer createObj() { - return new VerifyIndex(); + Timer createObj(StreamInput in) throws IOException { + return new VerifyIndex(in); } }; } else { timer = new Translog(); streamer = new Streamer(stop, timer) { @Override - Timer createObj() { - return new Translog(); + Timer createObj(StreamInput in) throws IOException { + return new Translog(in); } }; } @@ -256,8 +254,8 @@ public void testIndex() throws Throwable { Streamer backgroundReader = new Streamer(streamShouldStop, index) { @Override - Index createObj() { - return new Index(); + Index createObj(StreamInput in) throws IOException { + return new Index(in); } }; @@ -381,8 +379,8 @@ public void testTranslog() throws Throwable { AtomicBoolean stop = new AtomicBoolean(); Streamer streamer = new Streamer(stop, translog) { @Override - Translog createObj() { - return new Translog(); + Translog createObj(StreamInput in) throws IOException { + return new Translog(in); } }; @@ -458,8 +456,8 @@ public void testStart() throws IOException { AtomicBoolean stop = new AtomicBoolean(); Streamer streamer = new Streamer(stop, verifyIndex) { @Override - VerifyIndex createObj() { - return new VerifyIndex(); + VerifyIndex createObj(StreamInput in) throws IOException { + return new VerifyIndex(in); } }; @@ -508,8 +506,8 @@ public void testConcurrentModificationIndexFileDetailsMap() throws InterruptedEx final AtomicBoolean stop = new AtomicBoolean(false); Streamer readWriteIndex = new Streamer(stop, index) { @Override - Index createObj() { - return new Index(); + Index createObj(StreamInput in) throws IOException { + return new Index(in); } }; Thread modifyThread = new Thread() { From d1ff450edc4cca7a6d6b682f5320d20d0130c8cb Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Fri, 25 Jan 2019 23:29:10 +0200 Subject: [PATCH 61/64] SQL: Fix casting from date to numeric type to use millis (#37869) Previously casting from a DATE[TIME] type to a numeric (DOUBLE, LONG, INT, etc. used seconds instead of the epoch millis. Fixes: #37655 --- .../qa/src/main/resources/datetime.csv-spec | 42 +++++------ .../xpack/sql/type/DataTypeConversion.java | 2 +- .../sql/type/DataTypeConversionTests.java | 69 +++++++++++++++---- 3 files changed, 77 insertions(+), 36 deletions(-) diff --git a/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec index 39681e7118fc1..50df0f7dfe67e 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec @@ -131,32 +131,32 @@ SELECT CONVERT(birth_date, DOUBLE) AS date FROM test_emp GROUP BY date ORDER BY date:d --------------- null --5.631552E8 --5.586624E8 --5.56416E8 --5.539104E8 --5.517504E8 --5.492448E8 --5.406912E8 --5.371488E8 --5.359392E8 +-5.631552E11 +-5.586624E11 +-5.56416E11 +-5.539104E11 +-5.517504E11 +-5.492448E11 +-5.406912E11 +-5.371488E11 +-5.359392E11 ; castedDateTimeWithGroupBy2 -SELECT CAST(hire_date AS INTEGER) AS date FROM test_emp GROUP BY date ORDER BY date LIMIT 10; +SELECT CAST(hire_date AS LONG) AS date FROM test_emp GROUP BY date ORDER BY date LIMIT 10; - date:i + date:l --------------- -477532800 -478051200 -484790400 -489715200 -495763200 -498096000 -498614400 -501206400 -501292800 -501379200 +477532800000 +478051200000 +484790400000 +489715200000 +495763200000 +498096000000 +498614400000 +501206400000 +501292800000 +501379200000 ; dateTimeAggByIsoDayOfWeekWithFilter diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java index a578c6a7e0644..bc89b0f1e1587 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java @@ -557,7 +557,7 @@ private static Function fromBool(Function conve } private static Function fromDateTime(Function converter) { - return l -> converter.apply(((ZonedDateTime) l).toEpochSecond()); + return l -> converter.apply(((ZonedDateTime) l).toInstant().toEpochMilli()); } private static Function toDateTime(Conversion conversion) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index c42159bfaa35d..546d276e4ceb5 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -99,14 +99,14 @@ public void testConversionToLong() { { Conversion conversion = conversionFor(DATE, to); assertNull(conversion.convert(null)); - assertEquals(123379200L, conversion.convert(DateUtils.asDateOnly(123456789101L))); - assertEquals(-123465600L, conversion.convert(DateUtils.asDateOnly(-123456789101L))); + assertEquals(123379200000L, conversion.convert(DateUtils.asDateOnly(123456789101L))); + assertEquals(-123465600000L, conversion.convert(DateUtils.asDateOnly(-123456789101L))); } { Conversion conversion = conversionFor(DATETIME, to); assertNull(conversion.convert(null)); - assertEquals(123456789L, conversion.convert(asDateTime(123456789101L))); - assertEquals(-123456790L, conversion.convert(asDateTime(-123456789101L))); + assertEquals(123456789101L, conversion.convert(asDateTime(123456789101L))); + assertEquals(-123456789101L, conversion.convert(asDateTime(-123456789101L))); } { Conversion conversion = conversionFor(KEYWORD, to); @@ -238,14 +238,14 @@ public void testConversionToDouble() { { Conversion conversion = conversionFor(DATE, to); assertNull(conversion.convert(null)); - assertEquals(1.233792E8, (double) conversion.convert(DateUtils.asDateOnly(123456789101L)), 0); - assertEquals(-1.234656E8, (double) conversion.convert(DateUtils.asDateOnly(-123456789101L)), 0); + assertEquals(1.233792E11, (double) conversion.convert(DateUtils.asDateOnly(123456789101L)), 0); + assertEquals(-1.234656E11, (double) conversion.convert(DateUtils.asDateOnly(-123456789101L)), 0); } { Conversion conversion = conversionFor(DATETIME, to); assertNull(conversion.convert(null)); - assertEquals(1.23456789E8, (double) conversion.convert(asDateTime(123456789101L)), 0); - assertEquals(-1.2345679E8, (double) conversion.convert(asDateTime(-123456789101L)), 0); + assertEquals(1.23456789101E11, (double) conversion.convert(asDateTime(123456789101L)), 0); + assertEquals(-1.23456789101E11, (double) conversion.convert(asDateTime(-123456789101L)), 0); } { Conversion conversion = conversionFor(KEYWORD, to); @@ -340,20 +340,28 @@ public void testConversionToInt() { { Conversion conversion = conversionFor(DATE, to); assertNull(conversion.convert(null)); - assertEquals(123379200, conversion.convert(DateUtils.asDateOnly(123456789101L))); - assertEquals(-123465600, conversion.convert(DateUtils.asDateOnly(-123456789101L))); + assertEquals(0, conversion.convert(DateUtils.asDateOnly(12345678L))); + assertEquals(86400000, conversion.convert(DateUtils.asDateOnly(123456789L))); + assertEquals(172800000, conversion.convert(DateUtils.asDateOnly(223456789L))); + assertEquals(-172800000, conversion.convert(DateUtils.asDateOnly(-123456789L))); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(DateUtils.asDateOnly(Long.MAX_VALUE))); + assertEquals("[9223372036828800000] out of [integer] range", e.getMessage()); } { Conversion conversion = conversionFor(DATETIME, to); assertNull(conversion.convert(null)); - assertEquals(123456789, conversion.convert(asDateTime(123456789101L))); - assertEquals(-123456790, conversion.convert(asDateTime(-123456789101L))); + assertEquals(12345678, conversion.convert(DateUtils.asDateTime(12345678L))); + assertEquals(223456789, conversion.convert(DateUtils.asDateTime(223456789L))); + assertEquals(-123456789, conversion.convert(DateUtils.asDateTime(-123456789L))); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(DateUtils.asDateTime(Long.MAX_VALUE))); + assertEquals("[" + Long.MAX_VALUE + "] out of [integer] range", e.getMessage()); } } public void testConversionToShort() { + DataType to = SHORT; { - Conversion conversion = conversionFor(DOUBLE, SHORT); + Conversion conversion = conversionFor(DOUBLE, to); assertNull(conversion.convert(null)); assertEquals((short) 10, conversion.convert(10.0)); assertEquals((short) 10, conversion.convert(10.1)); @@ -361,11 +369,28 @@ public void testConversionToShort() { Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Integer.MAX_VALUE)); assertEquals("[" + Integer.MAX_VALUE + "] out of [short] range", e.getMessage()); } + { + Conversion conversion = conversionFor(DATE, to); + assertNull(conversion.convert(null)); + assertEquals((short) 0, conversion.convert(DateUtils.asDateOnly(12345678L))); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(DateUtils.asDateOnly(123456789L))); + assertEquals("[86400000] out of [short] range", e.getMessage()); + } + { + Conversion conversion = conversionFor(DATETIME, to); + assertNull(conversion.convert(null)); + assertEquals((short) 12345, conversion.convert(DateUtils.asDateTime(12345L))); + assertEquals((short) -12345, conversion.convert(DateUtils.asDateTime(-12345L))); + Exception e = expectThrows(SqlIllegalArgumentException.class, + () -> conversion.convert(DateUtils.asDateTime(Integer.MAX_VALUE))); + assertEquals("[" + Integer.MAX_VALUE + "] out of [short] range", e.getMessage()); + } } public void testConversionToByte() { + DataType to = BYTE; { - Conversion conversion = conversionFor(DOUBLE, BYTE); + Conversion conversion = conversionFor(DOUBLE, to); assertNull(conversion.convert(null)); assertEquals((byte) 10, conversion.convert(10.0)); assertEquals((byte) 10, conversion.convert(10.1)); @@ -373,6 +398,22 @@ public void testConversionToByte() { Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Short.MAX_VALUE)); assertEquals("[" + Short.MAX_VALUE + "] out of [byte] range", e.getMessage()); } + { + Conversion conversion = conversionFor(DATE, to); + assertNull(conversion.convert(null)); + assertEquals((byte) 0, conversion.convert(DateUtils.asDateOnly(12345678L))); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(DateUtils.asDateOnly(123456789L))); + assertEquals("[86400000] out of [byte] range", e.getMessage()); + } + { + Conversion conversion = conversionFor(DATETIME, to); + assertNull(conversion.convert(null)); + assertEquals((byte) 123, conversion.convert(DateUtils.asDateTime(123L))); + assertEquals((byte) -123, conversion.convert(DateUtils.asDateTime(-123L))); + Exception e = expectThrows(SqlIllegalArgumentException.class, + () -> conversion.convert(DateUtils.asDateTime(Integer.MAX_VALUE))); + assertEquals("[" + Integer.MAX_VALUE + "] out of [byte] range", e.getMessage()); + } } public void testConversionToNull() { From a4020f458779386c91b5c4bb49bc5496ca841d66 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 25 Jan 2019 13:40:02 -0800 Subject: [PATCH 62/64] Mute SharedClusterSnapshotRestoreIT#testSnapshotCanceledOnRemovedShard Tracked in #37888. --- .../elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index e9ce98b564e1d..a4d4c31517a7e 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -3179,6 +3179,7 @@ public void testGetSnapshotsRequest() throws Exception { * * See https://github.com/elastic/elasticsearch/issues/20876 */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37888") public void testSnapshotCanceledOnRemovedShard() throws Exception { final int numPrimaries = 1; final int numReplicas = 1; From 980c13d85cf43097c8b46da4d1d3450ded71ae42 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 25 Jan 2019 13:49:16 -0800 Subject: [PATCH 63/64] Pass distribution type through to docs tests (#37885) This commit fixes the distribution flavor passed to the docs tests to be the same as the distribution. These two values are now in sync (either oss or default) for the docs tests. --- .../groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 15cef3f472817..ce76ad5d28fc6 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -45,8 +45,7 @@ public class DocsTestPlugin extends RestTestPlugin { '\\{version\\}': Version.fromString(VersionProperties.elasticsearch).toString(), '\\{version_qualified\\}': VersionProperties.elasticsearch, '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), - '\\{build_flavor\\}' : - project.integTestCluster.distribution.startsWith('oss-') ? 'oss' : 'default', + '\\{build_flavor\\}' : project.integTestCluster.distribution, ] Task listSnippets = project.tasks.create('listSnippets', SnippetsTask) listSnippets.group 'Docs' From 899dfc38bccce8da9c4e713702659ab813c8c321 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Jan 2019 22:55:29 +0100 Subject: [PATCH 64/64] Fix S3 Repository ITs When Docker is not Available (#37878) * Disable Minio fixture and tests that require it when fixtures are disabled or Docker is not available * Relates #37852 --- plugins/repository-s3/build.gradle | 48 ++++++++++++++++-------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 49c60d2edd730..779274cfd5e17 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -170,28 +170,33 @@ if (useFixture) { preProcessFixture.dependsOn(writeDockerFile) // The following closure must execute before the afterEvaluate block in the constructor of the following integrationTest tasks: project.afterEvaluate { - ClusterConfiguration cluster = project.extensions.getByName('integTestMinioCluster') as ClusterConfiguration - cluster.dependsOn(project.bundlePlugin) - cluster.dependsOn(postProcessFixture) - cluster.keystoreSetting 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - cluster.keystoreSetting 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - - Closure minioAddressAndPort = { - int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" - assert minioPort > 0 - return 'http://127.0.0.1:' + minioPort + // Only configure the Minio tests if postProcessFixture is configured to skip them if Docker is not available + // or fixtures have been disabled + if (postProcessFixture.enabled) { + ClusterConfiguration cluster = project.extensions.getByName('integTestMinioCluster') as ClusterConfiguration + cluster.dependsOn(project.bundlePlugin) + cluster.dependsOn(postProcessFixture) + cluster.keystoreSetting 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey + cluster.keystoreSetting 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey + + Closure minioAddressAndPort = { + int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" + assert minioPort > 0 + return 'http://127.0.0.1:' + minioPort + } + cluster.setting 's3.client.integration_test_permanent.endpoint', "${-> minioAddressAndPort.call()}" + + Task restIntegTestTask = project.tasks.getByName('integTestMinio') + restIntegTestTask.clusterConfig.plugin(project.path) + + // Default jvm arguments for all test clusters + String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + + " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + + " " + System.getProperty('tests.jvm.argline', '') + + restIntegTestTask.clusterConfig.jvmArgs = jvmArgs + project.check.dependsOn(integTestMinio) } - cluster.setting 's3.client.integration_test_permanent.endpoint', "${ -> minioAddressAndPort.call()}" - - Task restIntegTestTask = project.tasks.getByName('integTestMinio') - restIntegTestTask.clusterConfig.plugin(project.path) - - // Default jvm arguments for all test clusters - String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + - " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + - " " + System.getProperty('tests.jvm.argline', '') - - restIntegTestTask.clusterConfig.jvmArgs = jvmArgs } integTestMinioRunner.dependsOn(postProcessFixture) @@ -202,7 +207,6 @@ if (useFixture) { 'repository_s3/50_repository_ecs_credentials/*' ].join(",") - project.check.dependsOn(integTestMinio) BuildPlugin.requireDocker(integTestMinio) }