From 94b46d8b0b1361233b062121fda4cbc8e3521d5b Mon Sep 17 00:00:00 2001 From: Vigya Sharma Date: Wed, 10 Apr 2019 11:52:09 +0530 Subject: [PATCH 001/321] Reset max_retries counter before executing routing commands --- .../routing/allocation/AllocationService.java | 15 ++-- .../RetryFailedAllocationTests.java | 82 +++++++++++++++++++ 2 files changed, 91 insertions(+), 6 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index c688a120a8b6a..b7297a8a544db 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -296,7 +296,7 @@ private void removeDelayMarkers(RoutingAllocation allocation) { /** * Reset failed allocation counter for unassigned shards */ - private void resetFailedAllocationCounter(RoutingAllocation allocation) { + private RoutingAllocation resetFailedAllocationCounter(ClusterState oldState, RoutingAllocation allocation) { final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator(); while (unassignedIterator.hasNext()) { ShardRouting shardRouting = unassignedIterator.next(); @@ -307,6 +307,9 @@ private void resetFailedAllocationCounter(RoutingAllocation allocation) { unassignedInfo.getUnassignedTimeInMillis(), unassignedInfo.isDelayed(), unassignedInfo.getLastAllocationStatus()), shardRouting.recoverySource(), allocation.changes()); } + ClusterState newState = buildResult(oldState, allocation); + return new RoutingAllocation(allocationDeciders, getMutableRoutingNodes(newState), newState, + clusterInfoService.getClusterInfo(), allocation.getCurrentNanoTime()); } /** @@ -337,16 +340,16 @@ public CommandsResult reroute(final ClusterState clusterState, AllocationCommand allocation.debugDecision(true); // we ignore disable allocation, because commands are explicit allocation.ignoreDisable(true); + + if (retryFailed) { + allocation = resetFailedAllocationCounter(clusterState, allocation); + } + RoutingExplanations explanations = commands.execute(allocation, explain); // we revert the ignore disable flag, since when rerouting, we want the original setting to take place allocation.ignoreDisable(false); // the assumption is that commands will move / act on shards (or fail through exceptions) // so, there will always be shard "movements", so no need to check on reroute - - if (retryFailed) { - resetFailedAllocationCounter(allocation); - } - reroute(allocation); return new CommandsResult(explanations, buildResultAndLogHealthChange(clusterState, allocation, "reroute commands")); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java new file mode 100644 index 0000000000000..73c2d03149b43 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java @@ -0,0 +1,82 @@ +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; +import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; +import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; +import org.elasticsearch.common.settings.Settings; + +import java.util.Collections; +import java.util.List; + +public class RetryFailedAllocationTests extends ESAllocationTestCase { + + private MockAllocationService strategy; + private ClusterState clusterState; + private final String INDEX_NAME = "index"; + + @Override + public void setUp() throws Exception { + super.setUp(); + MetaData metaData = MetaData.builder().put(IndexMetaData.builder(INDEX_NAME) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)).build(); + RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index(INDEX_NAME)).build(); + clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + strategy = createAllocationService(Settings.EMPTY); + } + + private ShardRouting getPrimary() { + for (ShardRouting shard: clusterState.getRoutingTable().allShards()) { + if (shard.getIndexName().equals(INDEX_NAME) && shard.primary()) { + return shard; + } + } + throw new IllegalArgumentException("No primary found for index: " + INDEX_NAME); + } + + private ShardRouting getReplica() { + for (ShardRouting shard: clusterState.getRoutingTable().allShards()) { + if (shard.getIndexName().equals(INDEX_NAME) && !shard.primary()) { + return shard; + } + } + throw new IllegalArgumentException("No replica found for index: " + INDEX_NAME); + } + + public void testRetryFailedResetForAllocationCommands() { + final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY); + clusterState = strategy.reroute(clusterState, "initial allocation"); + clusterState = strategy.applyStartedShards(clusterState, Collections.singletonList(getPrimary())); + + // Exhaust all replica allocation attempts with shard failures + for (int i = 0; i < retries; i++) { + List failedShards = Collections.singletonList( + new FailedShard(getReplica(), "failing-shard::attempt-" + i, + new UnsupportedOperationException(), randomBoolean())); + clusterState = strategy.applyFailedShards(clusterState, failedShards); + clusterState = strategy.reroute(clusterState, "allocation retry attempt-" + i); + } + + // Now allocate replica with retry_failed flag set + AllocationService.CommandsResult result = strategy.reroute(clusterState, + new AllocationCommands(new AllocateReplicaAllocationCommand(INDEX_NAME, 0, + getPrimary().currentNodeId().equals("node1") ? "node2" : "node1")), + false, true); + clusterState = result.getClusterState(); + + assertEquals(ShardRoutingState.INITIALIZING, getReplica().state()); + clusterState = strategy.applyStartedShards(clusterState, Collections.singletonList(getReplica())); + assertEquals(ShardRoutingState.STARTED, getReplica().state()); + assertFalse(clusterState.getRoutingNodes().hasUnassignedShards()); + } +} From c32b67e468738dc63bbd9ba36b6d554576cab30a Mon Sep 17 00:00:00 2001 From: Vigya Sharma Date: Thu, 11 Apr 2019 22:40:42 +0530 Subject: [PATCH 002/321] Add license header --- .../RetryFailedAllocationTests.java | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java index 73c2d03149b43..f2bc145f7a448 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RetryFailedAllocationTests.java @@ -1,3 +1,22 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.Version; From fdf1d4186e70c982ab11fa138d0f9d3df49b1085 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 7 May 2019 22:44:47 -0400 Subject: [PATCH 003/321] Enable trace logging in CCR retention lease tests These tests are failing somewhat mysteriously, indicating that when we renew retention leaess during a restore that our retention leases that we added before starting the restore suddenly do not exist. To make sense of this, this commit enables trace logging. --- .../java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index 33c1428c7e19e..2cf6e3bdaf332 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -44,6 +44,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteTransportException; @@ -87,6 +88,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; +@TestLogging(value = "org.elasticsearch.xpack.ccr:trace") public class CcrRetentionLeaseIT extends CcrIntegTestCase { public static final class RetentionLeaseRenewIntervalSettingPlugin extends Plugin { @@ -615,7 +617,6 @@ public void testRetentionLeaseAdvancesWhileFollowing() throws Exception { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39509") public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; @@ -922,7 +923,6 @@ public void onResponseReceived( } } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/39850") public void testForgetFollower() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; From d94b1478c005022cb8f2d958e8e41dc032c2a6a1 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 7 May 2019 22:50:11 -0700 Subject: [PATCH 004/321] Disable rhel8 in packaging tests (#41924) This commit disables rhel 8 from being tested in vagrant packaging tests. The vagrant image we use is beta release, but RHEL 8 was just released, which has caused the package mirrors for the beta to stop working. --- .../org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 8beb279421981..8eb200cd19b45 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -30,7 +30,7 @@ class VagrantTestPlugin implements Plugin { 'oel-6', 'oel-7', 'opensuse-42', - 'rhel-8', + /* TODO: need a real RHEL license now that it is out of beta 'rhel-8',*/ 'sles-12', 'ubuntu-1604', 'ubuntu-1804' From 89d31de5355310fe273fc66e9c040d7a54f43b23 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 8 May 2019 09:26:37 +0200 Subject: [PATCH 005/321] Upgrade SDK and test discovery-ec2 credential providers (#41732) Upgrades the AWS SDK to the same version that we're using for the repository-s3 plugin, providing testing capabilities to override certain SDK endpoints in order to point them to localhost for testing. Adds tests for the various credential providers. --- plugins/discovery-ec2/build.gradle | 2 +- .../aws-java-sdk-core-1.11.187.jar.sha1 | 1 - .../aws-java-sdk-core-1.11.505.jar.sha1 | 1 + .../aws-java-sdk-ec2-1.11.187.jar.sha1 | 1 - .../aws-java-sdk-ec2-1.11.505.jar.sha1 | 1 + .../discovery-ec2/qa/amazon-ec2/build.gradle | 84 +- .../discovery/ec2/AmazonEC2Fixture.java | 49 +- .../discovery/ec2/AwsEc2ServiceImpl.java | 9 +- .../discovery/ec2/AmazonEC2Mock.java | 1329 +++++++++++++---- 9 files changed, 1134 insertions(+), 343 deletions(-) delete mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.505.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.505.jar.sha1 diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 16750a9788ae9..50dc6ac5d85b5 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -23,7 +23,7 @@ esplugin { } versions << [ - 'aws': '1.11.187' + 'aws': '1.11.505' ] dependencies { diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 deleted file mode 100644 index a5293a9bf6580..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6f47fcd3c2917bef69dc36aba203c5ea4af9bf24 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.505.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.505.jar.sha1 new file mode 100644 index 0000000000000..add5db290e8a8 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.505.jar.sha1 @@ -0,0 +1 @@ +d19328c227b2b5ad81d137361ebc9cbcd0396465 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 deleted file mode 100644 index 4602436e08182..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3e5a8601f3105624674b1a12ca34f453a4b5895 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.505.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.505.jar.sha1 new file mode 100644 index 0000000000000..857f0888de3aa --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.505.jar.sha1 @@ -0,0 +1 @@ +b669b3c90ea9bf73734ab26f0cb30c5c66addf55 \ No newline at end of file diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index 209ab3278398d..8397549f384fe 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -20,6 +20,7 @@ import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture +import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -30,14 +31,6 @@ dependencies { final int ec2NumberOfNodes = 3 -/** A task to start the AmazonEC2Fixture which emulates an EC2 service **/ -task ec2Fixture(type: AntFixture) { - dependsOn compileTestJava - env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" - executable = new File(project.runtimeJavaHome, 'bin/java') - args 'org.elasticsearch.discovery.ec2.AmazonEC2Fixture', baseDir, "${buildDir}/testclusters/integTest-1/config/unicast_hosts.txt" -} - Map expansions = [ 'expected_nodes': ec2NumberOfNodes ] @@ -47,20 +40,71 @@ processTestResources { MavenFilteringHack.filter(it, expansions) } -integTest { - dependsOn ec2Fixture, project(':plugins:discovery-ec2').bundlePlugin -} +// disable default test task, use spezialized ones below +integTest.enabled = false + +/* + * Test using various credential providers (see also https://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/credentials.html): + * - Elasticsearch Keystore (secure settings discovery.ec2.access_key and discovery.ec2.secret_key) + * - Java system properties (aws.accessKeyId and aws.secretAccessKey) + * - Environment variables (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY) + * - ECS container credentials (loaded from ECS if the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI is set) + * - Instance profile credentials (delivered through the EC2 metadata service) + * + * Notably missing is a test for the default credential profiles file, which is located at ~/.aws/credentials and would at least require a + * custom Java security policy to work. + */ +['KeyStore', 'EnvVariables', 'SystemProperties', 'ContainerCredentials', 'InstanceProfile'].forEach { action -> + AntFixture fixture = tasks.create(name: "ec2Fixture${action}", type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.discovery.ec2.AmazonEC2Fixture', baseDir, "${buildDir}/testclusters/integTest${action}-1/config/unicast_hosts.txt" + } + + tasks.create(name: "integTest${action}", type: RestIntegTestTask) { + dependsOn fixture, project(':plugins:discovery-ec2').bundlePlugin + } + + check.dependsOn("integTest${action}") + + testClusters."integTest${action}" { + numberOfNodes = ec2NumberOfNodes + plugin file(project(':plugins:discovery-ec2').bundlePlugin.archiveFile) -testClusters.integTest { - numberOfNodes = ec2NumberOfNodes - plugin file(project(':plugins:discovery-ec2').bundlePlugin.archiveFile) + setting 'discovery.seed_providers', 'ec2' + setting 'network.host', '_ec2_' + setting 'discovery.ec2.endpoint', { "http://${-> fixture.addressAndPort}" } + systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${-> fixture.addressAndPort}" } + } +} + +// Extra config for KeyStore +testClusters.integTestKeyStore { keystore 'discovery.ec2.access_key', 'ec2_integration_test_access_key' keystore 'discovery.ec2.secret_key', 'ec2_integration_test_secret_key' - - setting 'discovery.seed_providers', 'ec2' - setting 'network.host', '_ec2_' - setting 'discovery.ec2.endpoint', { "http://${ec2Fixture.addressAndPort}" } - - systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${ec2Fixture.addressAndPort}" } } + +// Extra config for EnvVariables +testClusters.integTestEnvVariables { + environment 'AWS_ACCESS_KEY_ID', 'ec2_integration_test_access_key' + environment 'AWS_SECRET_ACCESS_KEY', 'ec2_integration_test_secret_key' +} + +// Extra config for SystemProperties +testClusters.integTestSystemProperties { + systemProperty 'aws.accessKeyId', 'ec2_integration_test_access_key' + systemProperty 'aws.secretKey', 'ec2_integration_test_secret_key' +} + +// Extra config for ContainerCredentials +ec2FixtureContainerCredentials.env 'ACTIVATE_CONTAINER_CREDENTIALS', true + +testClusters.integTestContainerCredentials { + environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', + { "http://${-> tasks.findByName("ec2FixtureContainerCredentials").addressAndPort}/ecs_credentials_endpoint" } +} + +// Extra config for InstanceProfile +ec2FixtureInstanceProfile.env 'ACTIVATE_INSTANCE_PROFILE', true diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java index 6027bd861590e..32abcdc43e645 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java @@ -18,10 +18,12 @@ */ package org.elasticsearch.discovery.ec2; +import com.amazonaws.util.DateUtils; import org.apache.http.NameValuePair; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.utils.URLEncodedUtils; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.fixture.AbstractHttpFixture; @@ -34,8 +36,12 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; import java.util.UUID; +import java.util.concurrent.TimeUnit; import static java.nio.charset.StandardCharsets.UTF_8; @@ -45,10 +51,14 @@ public class AmazonEC2Fixture extends AbstractHttpFixture { private final Path nodes; + private final boolean instanceProfile; + private final boolean containerCredentials; - private AmazonEC2Fixture(final String workingDir, final String nodesUriPath) { + private AmazonEC2Fixture(final String workingDir, final String nodesUriPath, boolean instanceProfile, boolean containerCredentials) { super(workingDir); this.nodes = toPath(Objects.requireNonNull(nodesUriPath)); + this.instanceProfile = instanceProfile; + this.containerCredentials = containerCredentials; } public static void main(String[] args) throws Exception { @@ -56,7 +66,10 @@ public static void main(String[] args) throws Exception { throw new IllegalArgumentException("AmazonEC2Fixture "); } - final AmazonEC2Fixture fixture = new AmazonEC2Fixture(args[0], args[1]); + boolean instanceProfile = Booleans.parseBoolean(System.getenv("ACTIVATE_INSTANCE_PROFILE"), false); + boolean containerCredentials = Booleans.parseBoolean(System.getenv("ACTIVATE_CONTAINER_CREDENTIALS"), false); + + final AmazonEC2Fixture fixture = new AmazonEC2Fixture(args[0], args[1], instanceProfile, containerCredentials); fixture.listen(); } @@ -65,6 +78,12 @@ protected Response handle(final Request request) throws IOException { if ("/".equals(request.getPath()) && (HttpPost.METHOD_NAME.equals(request.getMethod()))) { final String userAgent = request.getHeader("User-Agent"); if (userAgent != null && userAgent.startsWith("aws-sdk-java")) { + + final String auth = request.getHeader("Authorization"); + if (auth == null || auth.contains("ec2_integration_test_access_key") == false) { + throw new IllegalArgumentException("wrong access key: " + auth); + } + // Simulate an EC2 DescribeInstancesResponse byte[] responseBody = EMPTY_BYTE; for (NameValuePair parse : URLEncodedUtils.parse(new String(request.getBody(), UTF_8), UTF_8)) { @@ -79,6 +98,32 @@ protected Response handle(final Request request) throws IOException { if ("/latest/meta-data/local-ipv4".equals(request.getPath()) && (HttpGet.METHOD_NAME.equals(request.getMethod()))) { return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, "127.0.0.1".getBytes(UTF_8)); } + + if (instanceProfile && + "/latest/meta-data/iam/security-credentials/".equals(request.getPath()) && + HttpGet.METHOD_NAME.equals(request.getMethod())) { + final Map headers = new HashMap<>(contentType("text/plain")); + return new Response(RestStatus.OK.getStatus(), headers, "my_iam_profile".getBytes(UTF_8)); + } + + if ((containerCredentials && + "/ecs_credentials_endpoint".equals(request.getPath()) && + HttpGet.METHOD_NAME.equals(request.getMethod())) || + ("/latest/meta-data/iam/security-credentials/my_iam_profile".equals(request.getPath()) && + HttpGet.METHOD_NAME.equals(request.getMethod()))) { + final Date expiration = new Date(new Date().getTime() + TimeUnit.DAYS.toMillis(1)); + final String response = "{" + + "\"AccessKeyId\": \"" + "ec2_integration_test_access_key" + "\"," + + "\"Expiration\": \"" + DateUtils.formatISO8601Date(expiration) + "\"," + + "\"RoleArn\": \"" + "test" + "\"," + + "\"SecretAccessKey\": \"" + "test" + "\"," + + "\"Token\": \"" + "test" + "\"" + + "}"; + + final Map headers = new HashMap<>(contentType("application/json")); + return new Response(RestStatus.OK.getStatus(), headers, response.getBytes(UTF_8)); + } + return null; } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index 5f384c049124e..739b964925c3e 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -22,13 +22,12 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import com.amazonaws.http.IdleConnectionReaper; -import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.retry.RetryPolicy; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2Client; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -97,11 +96,11 @@ static ClientConfiguration buildConfiguration(Logger logger, Ec2ClientSettings c static AWSCredentialsProvider buildCredentials(Logger logger, Ec2ClientSettings clientSettings) { final AWSCredentials credentials = clientSettings.credentials; if (credentials == null) { - logger.debug("Using either environment variables, system properties or instance profile credentials"); - return new DefaultAWSCredentialsProviderChain(); + logger.debug("Using default provider chain"); + return DefaultAWSCredentialsProviderChain.getInstance(); } else { logger.debug("Using basic key/secret credentials"); - return new StaticCredentialsProvider(credentials); + return new AWSStaticCredentialsProvider(credentials); } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java index f1c373ee33a5a..3135769df5f46 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java @@ -20,37 +20,48 @@ package org.elasticsearch.discovery.ec2; import com.amazonaws.AmazonClientException; -import com.amazonaws.AmazonServiceException; import com.amazonaws.AmazonWebServiceRequest; import com.amazonaws.ClientConfiguration; import com.amazonaws.ResponseMetadata; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.regions.Region; import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteRequest; +import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteResult; +import com.amazonaws.services.ec2.model.AcceptTransitGatewayVpcAttachmentRequest; +import com.amazonaws.services.ec2.model.AcceptTransitGatewayVpcAttachmentResult; +import com.amazonaws.services.ec2.model.AcceptVpcEndpointConnectionsRequest; +import com.amazonaws.services.ec2.model.AcceptVpcEndpointConnectionsResult; import com.amazonaws.services.ec2.model.AcceptVpcPeeringConnectionRequest; import com.amazonaws.services.ec2.model.AcceptVpcPeeringConnectionResult; +import com.amazonaws.services.ec2.model.AdvertiseByoipCidrRequest; +import com.amazonaws.services.ec2.model.AdvertiseByoipCidrResult; import com.amazonaws.services.ec2.model.AllocateAddressRequest; import com.amazonaws.services.ec2.model.AllocateAddressResult; import com.amazonaws.services.ec2.model.AllocateHostsRequest; import com.amazonaws.services.ec2.model.AllocateHostsResult; -import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesRequest; -import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesResult; +import com.amazonaws.services.ec2.model.ApplySecurityGroupsToClientVpnTargetNetworkRequest; +import com.amazonaws.services.ec2.model.ApplySecurityGroupsToClientVpnTargetNetworkResult; import com.amazonaws.services.ec2.model.AssignIpv6AddressesRequest; import com.amazonaws.services.ec2.model.AssignIpv6AddressesResult; +import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesRequest; +import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesResult; import com.amazonaws.services.ec2.model.AssociateAddressRequest; import com.amazonaws.services.ec2.model.AssociateAddressResult; -import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockRequest; -import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockResult; -import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockRequest; -import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockResult; -import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileRequest; -import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileResult; -import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteRequest; -import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteResult; +import com.amazonaws.services.ec2.model.AssociateClientVpnTargetNetworkRequest; +import com.amazonaws.services.ec2.model.AssociateClientVpnTargetNetworkResult; import com.amazonaws.services.ec2.model.AssociateDhcpOptionsRequest; import com.amazonaws.services.ec2.model.AssociateDhcpOptionsResult; +import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileRequest; +import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileResult; import com.amazonaws.services.ec2.model.AssociateRouteTableRequest; import com.amazonaws.services.ec2.model.AssociateRouteTableResult; +import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockRequest; +import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockResult; +import com.amazonaws.services.ec2.model.AssociateTransitGatewayRouteTableRequest; +import com.amazonaws.services.ec2.model.AssociateTransitGatewayRouteTableResult; +import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockRequest; +import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockResult; import com.amazonaws.services.ec2.model.AttachClassicLinkVpcRequest; import com.amazonaws.services.ec2.model.AttachClassicLinkVpcResult; import com.amazonaws.services.ec2.model.AttachInternetGatewayRequest; @@ -61,6 +72,8 @@ import com.amazonaws.services.ec2.model.AttachVolumeResult; import com.amazonaws.services.ec2.model.AttachVpnGatewayRequest; import com.amazonaws.services.ec2.model.AttachVpnGatewayResult; +import com.amazonaws.services.ec2.model.AuthorizeClientVpnIngressRequest; +import com.amazonaws.services.ec2.model.AuthorizeClientVpnIngressResult; import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupEgressRequest; import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupEgressResult; import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressRequest; @@ -69,6 +82,8 @@ import com.amazonaws.services.ec2.model.BundleInstanceResult; import com.amazonaws.services.ec2.model.CancelBundleTaskRequest; import com.amazonaws.services.ec2.model.CancelBundleTaskResult; +import com.amazonaws.services.ec2.model.CancelCapacityReservationRequest; +import com.amazonaws.services.ec2.model.CancelCapacityReservationResult; import com.amazonaws.services.ec2.model.CancelConversionTaskRequest; import com.amazonaws.services.ec2.model.CancelConversionTaskResult; import com.amazonaws.services.ec2.model.CancelExportTaskRequest; @@ -83,24 +98,34 @@ import com.amazonaws.services.ec2.model.CancelSpotInstanceRequestsResult; import com.amazonaws.services.ec2.model.ConfirmProductInstanceRequest; import com.amazonaws.services.ec2.model.ConfirmProductInstanceResult; +import com.amazonaws.services.ec2.model.CopyFpgaImageRequest; +import com.amazonaws.services.ec2.model.CopyFpgaImageResult; import com.amazonaws.services.ec2.model.CopyImageRequest; import com.amazonaws.services.ec2.model.CopyImageResult; import com.amazonaws.services.ec2.model.CopySnapshotRequest; import com.amazonaws.services.ec2.model.CopySnapshotResult; +import com.amazonaws.services.ec2.model.CreateCapacityReservationRequest; +import com.amazonaws.services.ec2.model.CreateCapacityReservationResult; +import com.amazonaws.services.ec2.model.CreateClientVpnEndpointRequest; +import com.amazonaws.services.ec2.model.CreateClientVpnEndpointResult; +import com.amazonaws.services.ec2.model.CreateClientVpnRouteRequest; +import com.amazonaws.services.ec2.model.CreateClientVpnRouteResult; import com.amazonaws.services.ec2.model.CreateCustomerGatewayRequest; -import com.amazonaws.services.ec2.model.CreateDefaultVpcResult; -import com.amazonaws.services.ec2.model.CreateDefaultVpcRequest; import com.amazonaws.services.ec2.model.CreateCustomerGatewayResult; +import com.amazonaws.services.ec2.model.CreateDefaultSubnetRequest; +import com.amazonaws.services.ec2.model.CreateDefaultSubnetResult; +import com.amazonaws.services.ec2.model.CreateDefaultVpcRequest; +import com.amazonaws.services.ec2.model.CreateDefaultVpcResult; import com.amazonaws.services.ec2.model.CreateDhcpOptionsRequest; import com.amazonaws.services.ec2.model.CreateDhcpOptionsResult; import com.amazonaws.services.ec2.model.CreateEgressOnlyInternetGatewayRequest; import com.amazonaws.services.ec2.model.CreateEgressOnlyInternetGatewayResult; -import com.amazonaws.services.ec2.model.CreateFpgaImageRequest; -import com.amazonaws.services.ec2.model.CreateFpgaImageResult; -import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionRequest; -import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionResult; +import com.amazonaws.services.ec2.model.CreateFleetRequest; +import com.amazonaws.services.ec2.model.CreateFleetResult; import com.amazonaws.services.ec2.model.CreateFlowLogsRequest; import com.amazonaws.services.ec2.model.CreateFlowLogsResult; +import com.amazonaws.services.ec2.model.CreateFpgaImageRequest; +import com.amazonaws.services.ec2.model.CreateFpgaImageResult; import com.amazonaws.services.ec2.model.CreateImageRequest; import com.amazonaws.services.ec2.model.CreateImageResult; import com.amazonaws.services.ec2.model.CreateInstanceExportTaskRequest; @@ -109,12 +134,18 @@ import com.amazonaws.services.ec2.model.CreateInternetGatewayResult; import com.amazonaws.services.ec2.model.CreateKeyPairRequest; import com.amazonaws.services.ec2.model.CreateKeyPairResult; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateRequest; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateResult; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateVersionRequest; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateVersionResult; import com.amazonaws.services.ec2.model.CreateNatGatewayRequest; import com.amazonaws.services.ec2.model.CreateNatGatewayResult; import com.amazonaws.services.ec2.model.CreateNetworkAclEntryRequest; import com.amazonaws.services.ec2.model.CreateNetworkAclEntryResult; import com.amazonaws.services.ec2.model.CreateNetworkAclRequest; import com.amazonaws.services.ec2.model.CreateNetworkAclResult; +import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionRequest; +import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionResult; import com.amazonaws.services.ec2.model.CreateNetworkInterfaceRequest; import com.amazonaws.services.ec2.model.CreateNetworkInterfaceResult; import com.amazonaws.services.ec2.model.CreatePlacementGroupRequest; @@ -135,10 +166,22 @@ import com.amazonaws.services.ec2.model.CreateSubnetResult; import com.amazonaws.services.ec2.model.CreateTagsRequest; import com.amazonaws.services.ec2.model.CreateTagsResult; +import com.amazonaws.services.ec2.model.CreateTransitGatewayRequest; +import com.amazonaws.services.ec2.model.CreateTransitGatewayResult; +import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteRequest; +import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteResult; +import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteTableRequest; +import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteTableResult; +import com.amazonaws.services.ec2.model.CreateTransitGatewayVpcAttachmentRequest; +import com.amazonaws.services.ec2.model.CreateTransitGatewayVpcAttachmentResult; import com.amazonaws.services.ec2.model.CreateVolumeRequest; import com.amazonaws.services.ec2.model.CreateVolumeResult; +import com.amazonaws.services.ec2.model.CreateVpcEndpointConnectionNotificationRequest; +import com.amazonaws.services.ec2.model.CreateVpcEndpointConnectionNotificationResult; import com.amazonaws.services.ec2.model.CreateVpcEndpointRequest; import com.amazonaws.services.ec2.model.CreateVpcEndpointResult; +import com.amazonaws.services.ec2.model.CreateVpcEndpointServiceConfigurationRequest; +import com.amazonaws.services.ec2.model.CreateVpcEndpointServiceConfigurationResult; import com.amazonaws.services.ec2.model.CreateVpcPeeringConnectionRequest; import com.amazonaws.services.ec2.model.CreateVpcPeeringConnectionResult; import com.amazonaws.services.ec2.model.CreateVpcRequest; @@ -149,26 +192,38 @@ import com.amazonaws.services.ec2.model.CreateVpnConnectionRouteResult; import com.amazonaws.services.ec2.model.CreateVpnGatewayRequest; import com.amazonaws.services.ec2.model.CreateVpnGatewayResult; +import com.amazonaws.services.ec2.model.DeleteClientVpnEndpointRequest; +import com.amazonaws.services.ec2.model.DeleteClientVpnEndpointResult; +import com.amazonaws.services.ec2.model.DeleteClientVpnRouteRequest; +import com.amazonaws.services.ec2.model.DeleteClientVpnRouteResult; import com.amazonaws.services.ec2.model.DeleteCustomerGatewayRequest; import com.amazonaws.services.ec2.model.DeleteCustomerGatewayResult; import com.amazonaws.services.ec2.model.DeleteDhcpOptionsRequest; import com.amazonaws.services.ec2.model.DeleteDhcpOptionsResult; import com.amazonaws.services.ec2.model.DeleteEgressOnlyInternetGatewayRequest; import com.amazonaws.services.ec2.model.DeleteEgressOnlyInternetGatewayResult; -import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionRequest; -import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionResult; +import com.amazonaws.services.ec2.model.DeleteFleetsRequest; +import com.amazonaws.services.ec2.model.DeleteFleetsResult; import com.amazonaws.services.ec2.model.DeleteFlowLogsRequest; import com.amazonaws.services.ec2.model.DeleteFlowLogsResult; +import com.amazonaws.services.ec2.model.DeleteFpgaImageRequest; +import com.amazonaws.services.ec2.model.DeleteFpgaImageResult; import com.amazonaws.services.ec2.model.DeleteInternetGatewayRequest; import com.amazonaws.services.ec2.model.DeleteInternetGatewayResult; import com.amazonaws.services.ec2.model.DeleteKeyPairRequest; import com.amazonaws.services.ec2.model.DeleteKeyPairResult; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateRequest; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateResult; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsRequest; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsResult; import com.amazonaws.services.ec2.model.DeleteNatGatewayRequest; import com.amazonaws.services.ec2.model.DeleteNatGatewayResult; import com.amazonaws.services.ec2.model.DeleteNetworkAclEntryRequest; import com.amazonaws.services.ec2.model.DeleteNetworkAclEntryResult; import com.amazonaws.services.ec2.model.DeleteNetworkAclRequest; import com.amazonaws.services.ec2.model.DeleteNetworkAclResult; +import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionRequest; +import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionResult; import com.amazonaws.services.ec2.model.DeleteNetworkInterfaceRequest; import com.amazonaws.services.ec2.model.DeleteNetworkInterfaceResult; import com.amazonaws.services.ec2.model.DeletePlacementGroupRequest; @@ -187,8 +242,20 @@ import com.amazonaws.services.ec2.model.DeleteSubnetResult; import com.amazonaws.services.ec2.model.DeleteTagsRequest; import com.amazonaws.services.ec2.model.DeleteTagsResult; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayRequest; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayResult; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteRequest; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteResult; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteTableRequest; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteTableResult; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayVpcAttachmentRequest; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayVpcAttachmentResult; import com.amazonaws.services.ec2.model.DeleteVolumeRequest; import com.amazonaws.services.ec2.model.DeleteVolumeResult; +import com.amazonaws.services.ec2.model.DeleteVpcEndpointConnectionNotificationsRequest; +import com.amazonaws.services.ec2.model.DeleteVpcEndpointConnectionNotificationsResult; +import com.amazonaws.services.ec2.model.DeleteVpcEndpointServiceConfigurationsRequest; +import com.amazonaws.services.ec2.model.DeleteVpcEndpointServiceConfigurationsResult; import com.amazonaws.services.ec2.model.DeleteVpcEndpointsRequest; import com.amazonaws.services.ec2.model.DeleteVpcEndpointsResult; import com.amazonaws.services.ec2.model.DeleteVpcPeeringConnectionRequest; @@ -201,18 +268,36 @@ import com.amazonaws.services.ec2.model.DeleteVpnConnectionRouteResult; import com.amazonaws.services.ec2.model.DeleteVpnGatewayRequest; import com.amazonaws.services.ec2.model.DeleteVpnGatewayResult; +import com.amazonaws.services.ec2.model.DeprovisionByoipCidrRequest; +import com.amazonaws.services.ec2.model.DeprovisionByoipCidrResult; import com.amazonaws.services.ec2.model.DeregisterImageRequest; import com.amazonaws.services.ec2.model.DeregisterImageResult; import com.amazonaws.services.ec2.model.DescribeAccountAttributesRequest; import com.amazonaws.services.ec2.model.DescribeAccountAttributesResult; import com.amazonaws.services.ec2.model.DescribeAddressesRequest; import com.amazonaws.services.ec2.model.DescribeAddressesResult; +import com.amazonaws.services.ec2.model.DescribeAggregateIdFormatRequest; +import com.amazonaws.services.ec2.model.DescribeAggregateIdFormatResult; import com.amazonaws.services.ec2.model.DescribeAvailabilityZonesRequest; import com.amazonaws.services.ec2.model.DescribeAvailabilityZonesResult; import com.amazonaws.services.ec2.model.DescribeBundleTasksRequest; import com.amazonaws.services.ec2.model.DescribeBundleTasksResult; +import com.amazonaws.services.ec2.model.DescribeByoipCidrsRequest; +import com.amazonaws.services.ec2.model.DescribeByoipCidrsResult; +import com.amazonaws.services.ec2.model.DescribeCapacityReservationsRequest; +import com.amazonaws.services.ec2.model.DescribeCapacityReservationsResult; import com.amazonaws.services.ec2.model.DescribeClassicLinkInstancesRequest; import com.amazonaws.services.ec2.model.DescribeClassicLinkInstancesResult; +import com.amazonaws.services.ec2.model.DescribeClientVpnAuthorizationRulesRequest; +import com.amazonaws.services.ec2.model.DescribeClientVpnAuthorizationRulesResult; +import com.amazonaws.services.ec2.model.DescribeClientVpnConnectionsRequest; +import com.amazonaws.services.ec2.model.DescribeClientVpnConnectionsResult; +import com.amazonaws.services.ec2.model.DescribeClientVpnEndpointsRequest; +import com.amazonaws.services.ec2.model.DescribeClientVpnEndpointsResult; +import com.amazonaws.services.ec2.model.DescribeClientVpnRoutesRequest; +import com.amazonaws.services.ec2.model.DescribeClientVpnRoutesResult; +import com.amazonaws.services.ec2.model.DescribeClientVpnTargetNetworksRequest; +import com.amazonaws.services.ec2.model.DescribeClientVpnTargetNetworksResult; import com.amazonaws.services.ec2.model.DescribeConversionTasksRequest; import com.amazonaws.services.ec2.model.DescribeConversionTasksResult; import com.amazonaws.services.ec2.model.DescribeCustomerGatewaysRequest; @@ -221,26 +306,34 @@ import com.amazonaws.services.ec2.model.DescribeDhcpOptionsResult; import com.amazonaws.services.ec2.model.DescribeEgressOnlyInternetGatewaysRequest; import com.amazonaws.services.ec2.model.DescribeEgressOnlyInternetGatewaysResult; -import com.amazonaws.services.ec2.model.DescribeExportTasksRequest; -import com.amazonaws.services.ec2.model.DescribeExportTasksResult; import com.amazonaws.services.ec2.model.DescribeElasticGpusRequest; import com.amazonaws.services.ec2.model.DescribeElasticGpusResult; +import com.amazonaws.services.ec2.model.DescribeExportTasksRequest; +import com.amazonaws.services.ec2.model.DescribeExportTasksResult; +import com.amazonaws.services.ec2.model.DescribeFleetHistoryRequest; +import com.amazonaws.services.ec2.model.DescribeFleetHistoryResult; +import com.amazonaws.services.ec2.model.DescribeFleetInstancesRequest; +import com.amazonaws.services.ec2.model.DescribeFleetInstancesResult; +import com.amazonaws.services.ec2.model.DescribeFleetsRequest; +import com.amazonaws.services.ec2.model.DescribeFleetsResult; +import com.amazonaws.services.ec2.model.DescribeFlowLogsRequest; +import com.amazonaws.services.ec2.model.DescribeFlowLogsResult; +import com.amazonaws.services.ec2.model.DescribeFpgaImageAttributeRequest; +import com.amazonaws.services.ec2.model.DescribeFpgaImageAttributeResult; import com.amazonaws.services.ec2.model.DescribeFpgaImagesRequest; import com.amazonaws.services.ec2.model.DescribeFpgaImagesResult; import com.amazonaws.services.ec2.model.DescribeHostReservationOfferingsRequest; import com.amazonaws.services.ec2.model.DescribeHostReservationOfferingsResult; import com.amazonaws.services.ec2.model.DescribeHostReservationsRequest; import com.amazonaws.services.ec2.model.DescribeHostReservationsResult; -import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatRequest; -import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatResult; -import com.amazonaws.services.ec2.model.DescribeFlowLogsRequest; -import com.amazonaws.services.ec2.model.DescribeFlowLogsResult; import com.amazonaws.services.ec2.model.DescribeHostsRequest; import com.amazonaws.services.ec2.model.DescribeHostsResult; import com.amazonaws.services.ec2.model.DescribeIamInstanceProfileAssociationsRequest; import com.amazonaws.services.ec2.model.DescribeIamInstanceProfileAssociationsResult; import com.amazonaws.services.ec2.model.DescribeIdFormatRequest; import com.amazonaws.services.ec2.model.DescribeIdFormatResult; +import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatRequest; +import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatResult; import com.amazonaws.services.ec2.model.DescribeImageAttributeRequest; import com.amazonaws.services.ec2.model.DescribeImageAttributeResult; import com.amazonaws.services.ec2.model.DescribeImagesRequest; @@ -251,6 +344,8 @@ import com.amazonaws.services.ec2.model.DescribeImportSnapshotTasksResult; import com.amazonaws.services.ec2.model.DescribeInstanceAttributeRequest; import com.amazonaws.services.ec2.model.DescribeInstanceAttributeResult; +import com.amazonaws.services.ec2.model.DescribeInstanceCreditSpecificationsRequest; +import com.amazonaws.services.ec2.model.DescribeInstanceCreditSpecificationsResult; import com.amazonaws.services.ec2.model.DescribeInstanceStatusRequest; import com.amazonaws.services.ec2.model.DescribeInstanceStatusResult; import com.amazonaws.services.ec2.model.DescribeInstancesRequest; @@ -259,6 +354,10 @@ import com.amazonaws.services.ec2.model.DescribeInternetGatewaysResult; import com.amazonaws.services.ec2.model.DescribeKeyPairsRequest; import com.amazonaws.services.ec2.model.DescribeKeyPairsResult; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsRequest; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsResult; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesRequest; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesResult; import com.amazonaws.services.ec2.model.DescribeMovingAddressesRequest; import com.amazonaws.services.ec2.model.DescribeMovingAddressesResult; import com.amazonaws.services.ec2.model.DescribeNatGatewaysRequest; @@ -267,14 +366,18 @@ import com.amazonaws.services.ec2.model.DescribeNetworkAclsResult; import com.amazonaws.services.ec2.model.DescribeNetworkInterfaceAttributeRequest; import com.amazonaws.services.ec2.model.DescribeNetworkInterfaceAttributeResult; -import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesRequest; -import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesResult; import com.amazonaws.services.ec2.model.DescribeNetworkInterfacePermissionsRequest; import com.amazonaws.services.ec2.model.DescribeNetworkInterfacePermissionsResult; +import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesRequest; +import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesResult; import com.amazonaws.services.ec2.model.DescribePlacementGroupsRequest; import com.amazonaws.services.ec2.model.DescribePlacementGroupsResult; import com.amazonaws.services.ec2.model.DescribePrefixListsRequest; import com.amazonaws.services.ec2.model.DescribePrefixListsResult; +import com.amazonaws.services.ec2.model.DescribePrincipalIdFormatRequest; +import com.amazonaws.services.ec2.model.DescribePrincipalIdFormatResult; +import com.amazonaws.services.ec2.model.DescribePublicIpv4PoolsRequest; +import com.amazonaws.services.ec2.model.DescribePublicIpv4PoolsResult; import com.amazonaws.services.ec2.model.DescribeRegionsRequest; import com.amazonaws.services.ec2.model.DescribeRegionsResult; import com.amazonaws.services.ec2.model.DescribeReservedInstancesListingsRequest; @@ -291,12 +394,10 @@ import com.amazonaws.services.ec2.model.DescribeScheduledInstanceAvailabilityResult; import com.amazonaws.services.ec2.model.DescribeScheduledInstancesRequest; import com.amazonaws.services.ec2.model.DescribeScheduledInstancesResult; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; -import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsRequest; -import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsResult; import com.amazonaws.services.ec2.model.DescribeSecurityGroupReferencesRequest; import com.amazonaws.services.ec2.model.DescribeSecurityGroupReferencesResult; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; import com.amazonaws.services.ec2.model.DescribeSnapshotAttributeRequest; import com.amazonaws.services.ec2.model.DescribeSnapshotAttributeResult; import com.amazonaws.services.ec2.model.DescribeSnapshotsRequest; @@ -313,10 +414,20 @@ import com.amazonaws.services.ec2.model.DescribeSpotInstanceRequestsResult; import com.amazonaws.services.ec2.model.DescribeSpotPriceHistoryRequest; import com.amazonaws.services.ec2.model.DescribeSpotPriceHistoryResult; +import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsRequest; +import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsResult; import com.amazonaws.services.ec2.model.DescribeSubnetsRequest; import com.amazonaws.services.ec2.model.DescribeSubnetsResult; import com.amazonaws.services.ec2.model.DescribeTagsRequest; import com.amazonaws.services.ec2.model.DescribeTagsResult; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayAttachmentsRequest; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayAttachmentsResult; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayRouteTablesRequest; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayRouteTablesResult; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayVpcAttachmentsRequest; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayVpcAttachmentsResult; +import com.amazonaws.services.ec2.model.DescribeTransitGatewaysRequest; +import com.amazonaws.services.ec2.model.DescribeTransitGatewaysResult; import com.amazonaws.services.ec2.model.DescribeVolumeAttributeRequest; import com.amazonaws.services.ec2.model.DescribeVolumeAttributeResult; import com.amazonaws.services.ec2.model.DescribeVolumeStatusRequest; @@ -331,6 +442,14 @@ import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkDnsSupportResult; import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkRequest; import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkResult; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionNotificationsRequest; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionNotificationsResult; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionsRequest; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionsResult; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointServiceConfigurationsRequest; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointServiceConfigurationsResult; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicePermissionsRequest; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicePermissionsResult; import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicesRequest; import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicesResult; import com.amazonaws.services.ec2.model.DescribeVpcEndpointsRequest; @@ -353,6 +472,8 @@ import com.amazonaws.services.ec2.model.DetachVolumeResult; import com.amazonaws.services.ec2.model.DetachVpnGatewayRequest; import com.amazonaws.services.ec2.model.DetachVpnGatewayResult; +import com.amazonaws.services.ec2.model.DisableTransitGatewayRouteTablePropagationRequest; +import com.amazonaws.services.ec2.model.DisableTransitGatewayRouteTablePropagationResult; import com.amazonaws.services.ec2.model.DisableVgwRoutePropagationRequest; import com.amazonaws.services.ec2.model.DisableVgwRoutePropagationResult; import com.amazonaws.services.ec2.model.DisableVpcClassicLinkDnsSupportRequest; @@ -361,16 +482,22 @@ import com.amazonaws.services.ec2.model.DisableVpcClassicLinkResult; import com.amazonaws.services.ec2.model.DisassociateAddressRequest; import com.amazonaws.services.ec2.model.DisassociateAddressResult; -import com.amazonaws.services.ec2.model.DisassociateRouteTableRequest; -import com.amazonaws.services.ec2.model.DisassociateRouteTableResult; +import com.amazonaws.services.ec2.model.DisassociateClientVpnTargetNetworkRequest; +import com.amazonaws.services.ec2.model.DisassociateClientVpnTargetNetworkResult; import com.amazonaws.services.ec2.model.DisassociateIamInstanceProfileRequest; import com.amazonaws.services.ec2.model.DisassociateIamInstanceProfileResult; -import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockRequest; -import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockResult; +import com.amazonaws.services.ec2.model.DisassociateRouteTableRequest; +import com.amazonaws.services.ec2.model.DisassociateRouteTableResult; import com.amazonaws.services.ec2.model.DisassociateSubnetCidrBlockRequest; import com.amazonaws.services.ec2.model.DisassociateSubnetCidrBlockResult; +import com.amazonaws.services.ec2.model.DisassociateTransitGatewayRouteTableRequest; +import com.amazonaws.services.ec2.model.DisassociateTransitGatewayRouteTableResult; +import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockRequest; +import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockResult; import com.amazonaws.services.ec2.model.DryRunResult; import com.amazonaws.services.ec2.model.DryRunSupportedRequest; +import com.amazonaws.services.ec2.model.EnableTransitGatewayRouteTablePropagationRequest; +import com.amazonaws.services.ec2.model.EnableTransitGatewayRouteTablePropagationResult; import com.amazonaws.services.ec2.model.EnableVgwRoutePropagationRequest; import com.amazonaws.services.ec2.model.EnableVgwRoutePropagationResult; import com.amazonaws.services.ec2.model.EnableVolumeIORequest; @@ -379,6 +506,12 @@ import com.amazonaws.services.ec2.model.EnableVpcClassicLinkDnsSupportResult; import com.amazonaws.services.ec2.model.EnableVpcClassicLinkRequest; import com.amazonaws.services.ec2.model.EnableVpcClassicLinkResult; +import com.amazonaws.services.ec2.model.ExportClientVpnClientCertificateRevocationListRequest; +import com.amazonaws.services.ec2.model.ExportClientVpnClientCertificateRevocationListResult; +import com.amazonaws.services.ec2.model.ExportClientVpnClientConfigurationRequest; +import com.amazonaws.services.ec2.model.ExportClientVpnClientConfigurationResult; +import com.amazonaws.services.ec2.model.ExportTransitGatewayRoutesRequest; +import com.amazonaws.services.ec2.model.ExportTransitGatewayRoutesResult; import com.amazonaws.services.ec2.model.Filter; import com.amazonaws.services.ec2.model.GetConsoleOutputRequest; import com.amazonaws.services.ec2.model.GetConsoleOutputResult; @@ -386,10 +519,20 @@ import com.amazonaws.services.ec2.model.GetConsoleScreenshotResult; import com.amazonaws.services.ec2.model.GetHostReservationPurchasePreviewRequest; import com.amazonaws.services.ec2.model.GetHostReservationPurchasePreviewResult; +import com.amazonaws.services.ec2.model.GetLaunchTemplateDataRequest; +import com.amazonaws.services.ec2.model.GetLaunchTemplateDataResult; import com.amazonaws.services.ec2.model.GetPasswordDataRequest; import com.amazonaws.services.ec2.model.GetPasswordDataResult; import com.amazonaws.services.ec2.model.GetReservedInstancesExchangeQuoteRequest; import com.amazonaws.services.ec2.model.GetReservedInstancesExchangeQuoteResult; +import com.amazonaws.services.ec2.model.GetTransitGatewayAttachmentPropagationsRequest; +import com.amazonaws.services.ec2.model.GetTransitGatewayAttachmentPropagationsResult; +import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTableAssociationsRequest; +import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTableAssociationsResult; +import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTablePropagationsRequest; +import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTablePropagationsResult; +import com.amazonaws.services.ec2.model.ImportClientVpnClientCertificateRevocationListRequest; +import com.amazonaws.services.ec2.model.ImportClientVpnClientCertificateRevocationListResult; import com.amazonaws.services.ec2.model.ImportImageRequest; import com.amazonaws.services.ec2.model.ImportImageResult; import com.amazonaws.services.ec2.model.ImportInstanceRequest; @@ -403,18 +546,32 @@ import com.amazonaws.services.ec2.model.Instance; import com.amazonaws.services.ec2.model.InstanceState; import com.amazonaws.services.ec2.model.InstanceStateName; +import com.amazonaws.services.ec2.model.ModifyCapacityReservationRequest; +import com.amazonaws.services.ec2.model.ModifyCapacityReservationResult; +import com.amazonaws.services.ec2.model.ModifyClientVpnEndpointRequest; +import com.amazonaws.services.ec2.model.ModifyClientVpnEndpointResult; +import com.amazonaws.services.ec2.model.ModifyFleetRequest; +import com.amazonaws.services.ec2.model.ModifyFleetResult; +import com.amazonaws.services.ec2.model.ModifyFpgaImageAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyFpgaImageAttributeResult; import com.amazonaws.services.ec2.model.ModifyHostsRequest; import com.amazonaws.services.ec2.model.ModifyHostsResult; import com.amazonaws.services.ec2.model.ModifyIdFormatRequest; import com.amazonaws.services.ec2.model.ModifyIdFormatResult; +import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatRequest; +import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatResult; import com.amazonaws.services.ec2.model.ModifyImageAttributeRequest; import com.amazonaws.services.ec2.model.ModifyImageAttributeResult; import com.amazonaws.services.ec2.model.ModifyInstanceAttributeRequest; import com.amazonaws.services.ec2.model.ModifyInstanceAttributeResult; +import com.amazonaws.services.ec2.model.ModifyInstanceCapacityReservationAttributesRequest; +import com.amazonaws.services.ec2.model.ModifyInstanceCapacityReservationAttributesResult; +import com.amazonaws.services.ec2.model.ModifyInstanceCreditSpecificationRequest; +import com.amazonaws.services.ec2.model.ModifyInstanceCreditSpecificationResult; import com.amazonaws.services.ec2.model.ModifyInstancePlacementRequest; import com.amazonaws.services.ec2.model.ModifyInstancePlacementResult; -import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatRequest; -import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatResult; +import com.amazonaws.services.ec2.model.ModifyLaunchTemplateRequest; +import com.amazonaws.services.ec2.model.ModifyLaunchTemplateResult; import com.amazonaws.services.ec2.model.ModifyNetworkInterfaceAttributeRequest; import com.amazonaws.services.ec2.model.ModifyNetworkInterfaceAttributeResult; import com.amazonaws.services.ec2.model.ModifyReservedInstancesRequest; @@ -425,32 +582,48 @@ import com.amazonaws.services.ec2.model.ModifySpotFleetRequestResult; import com.amazonaws.services.ec2.model.ModifySubnetAttributeRequest; import com.amazonaws.services.ec2.model.ModifySubnetAttributeResult; +import com.amazonaws.services.ec2.model.ModifyTransitGatewayVpcAttachmentRequest; +import com.amazonaws.services.ec2.model.ModifyTransitGatewayVpcAttachmentResult; import com.amazonaws.services.ec2.model.ModifyVolumeAttributeRequest; import com.amazonaws.services.ec2.model.ModifyVolumeAttributeResult; import com.amazonaws.services.ec2.model.ModifyVolumeRequest; import com.amazonaws.services.ec2.model.ModifyVolumeResult; import com.amazonaws.services.ec2.model.ModifyVpcAttributeRequest; import com.amazonaws.services.ec2.model.ModifyVpcAttributeResult; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointConnectionNotificationRequest; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointConnectionNotificationResult; import com.amazonaws.services.ec2.model.ModifyVpcEndpointRequest; import com.amazonaws.services.ec2.model.ModifyVpcEndpointResult; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointServiceConfigurationRequest; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointServiceConfigurationResult; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointServicePermissionsRequest; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointServicePermissionsResult; +import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsRequest; +import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsResult; +import com.amazonaws.services.ec2.model.ModifyVpcTenancyRequest; +import com.amazonaws.services.ec2.model.ModifyVpcTenancyResult; import com.amazonaws.services.ec2.model.MonitorInstancesRequest; import com.amazonaws.services.ec2.model.MonitorInstancesResult; import com.amazonaws.services.ec2.model.MoveAddressToVpcRequest; import com.amazonaws.services.ec2.model.MoveAddressToVpcResult; +import com.amazonaws.services.ec2.model.ProvisionByoipCidrRequest; +import com.amazonaws.services.ec2.model.ProvisionByoipCidrResult; +import com.amazonaws.services.ec2.model.PurchaseHostReservationRequest; +import com.amazonaws.services.ec2.model.PurchaseHostReservationResult; import com.amazonaws.services.ec2.model.PurchaseReservedInstancesOfferingRequest; import com.amazonaws.services.ec2.model.PurchaseReservedInstancesOfferingResult; import com.amazonaws.services.ec2.model.PurchaseScheduledInstancesRequest; import com.amazonaws.services.ec2.model.PurchaseScheduledInstancesResult; -import com.amazonaws.services.ec2.model.PurchaseHostReservationRequest; -import com.amazonaws.services.ec2.model.PurchaseHostReservationResult; import com.amazonaws.services.ec2.model.RebootInstancesRequest; import com.amazonaws.services.ec2.model.RebootInstancesResult; import com.amazonaws.services.ec2.model.RegisterImageRequest; import com.amazonaws.services.ec2.model.RegisterImageResult; +import com.amazonaws.services.ec2.model.RejectTransitGatewayVpcAttachmentRequest; +import com.amazonaws.services.ec2.model.RejectTransitGatewayVpcAttachmentResult; +import com.amazonaws.services.ec2.model.RejectVpcEndpointConnectionsRequest; +import com.amazonaws.services.ec2.model.RejectVpcEndpointConnectionsResult; import com.amazonaws.services.ec2.model.RejectVpcPeeringConnectionRequest; import com.amazonaws.services.ec2.model.RejectVpcPeeringConnectionResult; -import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsRequest; -import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsResult; import com.amazonaws.services.ec2.model.ReleaseAddressRequest; import com.amazonaws.services.ec2.model.ReleaseAddressResult; import com.amazonaws.services.ec2.model.ReleaseHostsRequest; @@ -465,6 +638,8 @@ import com.amazonaws.services.ec2.model.ReplaceRouteResult; import com.amazonaws.services.ec2.model.ReplaceRouteTableAssociationRequest; import com.amazonaws.services.ec2.model.ReplaceRouteTableAssociationResult; +import com.amazonaws.services.ec2.model.ReplaceTransitGatewayRouteRequest; +import com.amazonaws.services.ec2.model.ReplaceTransitGatewayRouteResult; import com.amazonaws.services.ec2.model.ReportInstanceStatusRequest; import com.amazonaws.services.ec2.model.ReportInstanceStatusResult; import com.amazonaws.services.ec2.model.RequestSpotFleetRequest; @@ -472,6 +647,8 @@ import com.amazonaws.services.ec2.model.RequestSpotInstancesRequest; import com.amazonaws.services.ec2.model.RequestSpotInstancesResult; import com.amazonaws.services.ec2.model.Reservation; +import com.amazonaws.services.ec2.model.ResetFpgaImageAttributeRequest; +import com.amazonaws.services.ec2.model.ResetFpgaImageAttributeResult; import com.amazonaws.services.ec2.model.ResetImageAttributeRequest; import com.amazonaws.services.ec2.model.ResetImageAttributeResult; import com.amazonaws.services.ec2.model.ResetInstanceAttributeRequest; @@ -482,6 +659,8 @@ import com.amazonaws.services.ec2.model.ResetSnapshotAttributeResult; import com.amazonaws.services.ec2.model.RestoreAddressToClassicRequest; import com.amazonaws.services.ec2.model.RestoreAddressToClassicResult; +import com.amazonaws.services.ec2.model.RevokeClientVpnIngressRequest; +import com.amazonaws.services.ec2.model.RevokeClientVpnIngressResult; import com.amazonaws.services.ec2.model.RevokeSecurityGroupEgressRequest; import com.amazonaws.services.ec2.model.RevokeSecurityGroupEgressResult; import com.amazonaws.services.ec2.model.RevokeSecurityGroupIngressRequest; @@ -490,11 +669,15 @@ import com.amazonaws.services.ec2.model.RunInstancesResult; import com.amazonaws.services.ec2.model.RunScheduledInstancesRequest; import com.amazonaws.services.ec2.model.RunScheduledInstancesResult; +import com.amazonaws.services.ec2.model.SearchTransitGatewayRoutesRequest; +import com.amazonaws.services.ec2.model.SearchTransitGatewayRoutesResult; import com.amazonaws.services.ec2.model.StartInstancesRequest; import com.amazonaws.services.ec2.model.StartInstancesResult; import com.amazonaws.services.ec2.model.StopInstancesRequest; import com.amazonaws.services.ec2.model.StopInstancesResult; import com.amazonaws.services.ec2.model.Tag; +import com.amazonaws.services.ec2.model.TerminateClientVpnConnectionsRequest; +import com.amazonaws.services.ec2.model.TerminateClientVpnConnectionsResult; import com.amazonaws.services.ec2.model.TerminateInstancesRequest; import com.amazonaws.services.ec2.model.TerminateInstancesResult; import com.amazonaws.services.ec2.model.UnassignIpv6AddressesRequest; @@ -507,9 +690,11 @@ import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsEgressResult; import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsIngressRequest; import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsIngressResult; +import com.amazonaws.services.ec2.model.WithdrawByoipCidrRequest; +import com.amazonaws.services.ec2.model.WithdrawByoipCidrResult; import com.amazonaws.services.ec2.waiters.AmazonEC2Waiters; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.util.ArrayList; import java.util.Collection; @@ -563,7 +748,7 @@ public AmazonEC2Mock(int nodes, List> tagsList, AWSCredentialsProvider @Override public DescribeInstancesResult describeInstances(DescribeInstancesRequest describeInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { Collection filteredInstances = new ArrayList<>(); logger.debug("--> mocking describeInstances"); @@ -660,49 +845,77 @@ public void setRegion(Region region) throws IllegalArgumentException { @Override public AcceptReservedInstancesExchangeQuoteResult acceptReservedInstancesExchangeQuote( AcceptReservedInstancesExchangeQuoteRequest acceptReservedInstancesExchangeQuoteRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AcceptTransitGatewayVpcAttachmentResult acceptTransitGatewayVpcAttachment( + AcceptTransitGatewayVpcAttachmentRequest acceptTransitGatewayVpcAttachmentRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AcceptVpcEndpointConnectionsResult acceptVpcEndpointConnections( + AcceptVpcEndpointConnectionsRequest acceptVpcEndpointConnectionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RebootInstancesResult rebootInstances(RebootInstancesRequest rebootInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesResult describeReservedInstances( DescribeReservedInstancesRequest describeReservedInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateFlowLogsResult createFlowLogs(CreateFlowLogsRequest createFlowLogsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeAvailabilityZonesResult describeAvailabilityZones(DescribeAvailabilityZonesRequest describeAvailabilityZonesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RestoreAddressToClassicResult restoreAddressToClassic(RestoreAddressToClassicRequest restoreAddressToClassicRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public RevokeClientVpnIngressResult revokeClientVpnIngress(RevokeClientVpnIngressRequest revokeClientVpnIngressRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DetachVolumeResult detachVolume(DetachVolumeRequest detachVolumeRequest) throws AmazonServiceException, AmazonClientException { + public DetachVolumeResult detachVolume(DetachVolumeRequest detachVolumeRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteKeyPairResult deleteKeyPair(DeleteKeyPairRequest deleteKeyPairRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteLaunchTemplateResult deleteLaunchTemplate(DeleteLaunchTemplateRequest deleteLaunchTemplateRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteLaunchTemplateVersionsResult deleteLaunchTemplateVersions( + DeleteLaunchTemplateVersionsRequest deleteLaunchTemplateVersionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -713,171 +926,231 @@ public DeleteNatGatewayResult deleteNatGateway(DeleteNatGatewayRequest deleteNat @Override public UnmonitorInstancesResult unmonitorInstances(UnmonitorInstancesRequest unmonitorInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public UpdateSecurityGroupRuleDescriptionsIngressResult updateSecurityGroupRuleDescriptionsIngress( UpdateSecurityGroupRuleDescriptionsIngressRequest updateSecurityGroupRuleDescriptionsIngressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public WithdrawByoipCidrResult withdrawByoipCidr(WithdrawByoipCidrRequest withdrawByoipCidrRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public UpdateSecurityGroupRuleDescriptionsEgressResult updateSecurityGroupRuleDescriptionsEgress( UpdateSecurityGroupRuleDescriptionsEgressRequest updateSecurityGroupRuleDescriptionsEgressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AttachVpnGatewayResult attachVpnGateway(AttachVpnGatewayRequest attachVpnGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AuthorizeClientVpnIngressResult authorizeClientVpnIngress(AuthorizeClientVpnIngressRequest authorizeClientVpnIngressRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateImageResult createImage(CreateImageRequest createImageRequest) throws AmazonServiceException, AmazonClientException { + public CreateImageResult createImage(CreateImageRequest createImageRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteSecurityGroupResult deleteSecurityGroup(DeleteSecurityGroupRequest deleteSecurityGroupRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateInstanceExportTaskResult createInstanceExportTask(CreateInstanceExportTaskRequest createInstanceExportTaskRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AuthorizeSecurityGroupEgressResult authorizeSecurityGroupEgress( - AuthorizeSecurityGroupEgressRequest authorizeSecurityGroupEgressRequest) throws AmazonServiceException, AmazonClientException { + AuthorizeSecurityGroupEgressRequest authorizeSecurityGroupEgressRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateDhcpOptionsResult associateDhcpOptions(AssociateDhcpOptionsRequest associateDhcpOptionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public GetPasswordDataResult getPasswordData(GetPasswordDataRequest getPasswordDataRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public GetReservedInstancesExchangeQuoteResult getReservedInstancesExchangeQuote( GetReservedInstancesExchangeQuoteRequest getReservedInstancesExchangeQuoteRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public GetTransitGatewayAttachmentPropagationsResult getTransitGatewayAttachmentPropagations( + GetTransitGatewayAttachmentPropagationsRequest getTransitGatewayAttachmentPropagationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public GetTransitGatewayRouteTableAssociationsResult getTransitGatewayRouteTableAssociations( + GetTransitGatewayRouteTableAssociationsRequest getTransitGatewayRouteTableAssociationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public GetTransitGatewayRouteTablePropagationsResult getTransitGatewayRouteTablePropagations( + GetTransitGatewayRouteTablePropagationsRequest getTransitGatewayRouteTablePropagationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ImportClientVpnClientCertificateRevocationListResult importClientVpnClientCertificateRevocationList( + ImportClientVpnClientCertificateRevocationListRequest importClientVpnClientCertificateRevocationListRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public StopInstancesResult stopInstances(StopInstancesRequest stopInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public TerminateClientVpnConnectionsResult terminateClientVpnConnections( + TerminateClientVpnConnectionsRequest terminateClientVpnConnectionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ImportKeyPairResult importKeyPair(ImportKeyPairRequest importKeyPairRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteNetworkInterfaceResult deleteNetworkInterface(DeleteNetworkInterfaceRequest deleteNetworkInterfaceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyVpcAttributeResult modifyVpcAttribute(ModifyVpcAttributeRequest modifyVpcAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotFleetInstancesResult describeSpotFleetInstances(DescribeSpotFleetInstancesRequest describeSpotFleetInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateSecurityGroupResult createSecurityGroup(CreateSecurityGroupRequest createSecurityGroupRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotPriceHistoryResult describeSpotPriceHistory(DescribeSpotPriceHistoryRequest describeSpotPriceHistoryRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeNetworkInterfacesResult describeNetworkInterfaces(DescribeNetworkInterfacesRequest describeNetworkInterfacesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeNetworkInterfacePermissionsResult describeNetworkInterfacePermissions( DescribeNetworkInterfacePermissionsRequest describeNetworkInterfacePermissionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeRegionsResult describeRegions(DescribeRegionsRequest describeRegionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateDhcpOptionsResult createDhcpOptions(CreateDhcpOptionsRequest createDhcpOptionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateReservedInstancesListingResult createReservedInstancesListing( CreateReservedInstancesListingRequest createReservedInstancesListingRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteVpcEndpointsResult deleteVpcEndpoints(DeleteVpcEndpointsRequest deleteVpcEndpointsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ResetSnapshotAttributeResult resetSnapshotAttribute(ResetSnapshotAttributeRequest resetSnapshotAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteRouteResult deleteRoute(DeleteRouteRequest deleteRouteRequest) throws AmazonServiceException, AmazonClientException { + public DeleteRouteResult deleteRoute(DeleteRouteRequest deleteRouteRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeInternetGatewaysResult describeInternetGateways(DescribeInternetGatewaysRequest describeInternetGatewaysRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ImportVolumeResult importVolume(ImportVolumeRequest importVolumeRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyCapacityReservationResult modifyCapacityReservation(ModifyCapacityReservationRequest modifyCapacityReservationRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ImportVolumeResult importVolume(ImportVolumeRequest importVolumeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyClientVpnEndpointResult modifyClientVpnEndpoint(ModifyClientVpnEndpointRequest modifyClientVpnEndpointRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyFleetResult modifyFleet(ModifyFleetRequest modifyFleetRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyFpgaImageAttributeResult modifyFpgaImageAttribute(ModifyFpgaImageAttributeRequest modifyFpgaImageAttributeRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -893,514 +1166,599 @@ public ModifyIdFormatResult modifyIdFormat(ModifyIdFormatRequest modifyIdFormatR @Override public DescribeSecurityGroupsResult describeSecurityGroups(DescribeSecurityGroupsRequest describeSecurityGroupsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeStaleSecurityGroupsResult describeStaleSecurityGroups( DescribeStaleSecurityGroupsRequest describeStaleSecurityGroupsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSecurityGroupReferencesResult describeSecurityGroupReferences( DescribeSecurityGroupReferencesRequest describeSecurityGroupReferencesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RejectVpcPeeringConnectionResult rejectVpcPeeringConnection( RejectVpcPeeringConnectionRequest rejectVpcPeeringConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyVpcPeeringConnectionOptionsResult modifyVpcPeeringConnectionOptions( ModifyVpcPeeringConnectionOptionsRequest modifyVpcPeeringConnectionOptionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyVpcTenancyResult modifyVpcTenancy(ModifyVpcTenancyRequest modifyVpcTenancyRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteFlowLogsResult deleteFlowLogs(DeleteFlowLogsRequest deleteFlowLogsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteFpgaImageResult deleteFpgaImage(DeleteFpgaImageRequest deleteFpgaImageRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DetachVpnGatewayResult detachVpnGateway(DetachVpnGatewayRequest detachVpnGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisableTransitGatewayRouteTablePropagationResult disableTransitGatewayRouteTablePropagation( + DisableTransitGatewayRouteTablePropagationRequest disableTransitGatewayRouteTablePropagationRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeregisterImageResult deregisterImage(DeregisterImageRequest deregisterImageRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotDatafeedSubscriptionResult describeSpotDatafeedSubscription( DescribeSpotDatafeedSubscriptionRequest describeSpotDatafeedSubscriptionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteTagsResult deleteTags(DeleteTagsRequest deleteTagsRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteTransitGatewayResult deleteTransitGateway(DeleteTransitGatewayRequest deleteTransitGatewayRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteTagsResult deleteTags(DeleteTagsRequest deleteTagsRequest) throws AmazonServiceException, AmazonClientException { + public DeleteTransitGatewayRouteResult deleteTransitGatewayRoute(DeleteTransitGatewayRouteRequest deleteTransitGatewayRouteRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteTransitGatewayRouteTableResult deleteTransitGatewayRouteTable( + DeleteTransitGatewayRouteTableRequest deleteTransitGatewayRouteTableRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteTransitGatewayVpcAttachmentResult deleteTransitGatewayVpcAttachment( + DeleteTransitGatewayVpcAttachmentRequest deleteTransitGatewayVpcAttachmentRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteSubnetResult deleteSubnet(DeleteSubnetRequest deleteSubnetRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeAccountAttributesResult describeAccountAttributes(DescribeAccountAttributesRequest describeAccountAttributesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AttachClassicLinkVpcResult attachClassicLinkVpc(AttachClassicLinkVpcRequest attachClassicLinkVpcRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateVpnGatewayResult createVpnGateway(CreateVpnGatewayRequest createVpnGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteClientVpnEndpointResult deleteClientVpnEndpoint(DeleteClientVpnEndpointRequest deleteClientVpnEndpointRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteClientVpnRouteResult deleteClientVpnRoute(DeleteClientVpnRouteRequest deleteClientVpnRouteRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public EnableVolumeIOResult enableVolumeIO(EnableVolumeIORequest enableVolumeIORequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public MoveAddressToVpcResult moveAddressToVpc(MoveAddressToVpcRequest moveAddressToVpcRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ProvisionByoipCidrResult provisionByoipCidr(ProvisionByoipCidrRequest provisionByoipCidrRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteVpnGatewayResult deleteVpnGateway(DeleteVpnGatewayRequest deleteVpnGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public AttachVolumeResult attachVolume(AttachVolumeRequest attachVolumeRequest) throws AmazonServiceException, AmazonClientException { + public DeprovisionByoipCidrResult deprovisionByoipCidr(DeprovisionByoipCidrRequest deprovisionByoipCidrRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AttachVolumeResult attachVolume(AttachVolumeRequest attachVolumeRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVolumeStatusResult describeVolumeStatus(DescribeVolumeStatusRequest describeVolumeStatusRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVolumesModificationsResult describeVolumesModifications( DescribeVolumesModificationsRequest describeVolumesModificationsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeImportSnapshotTasksResult describeImportSnapshotTasks( DescribeImportSnapshotTasksRequest describeImportSnapshotTasksRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpnConnectionsResult describeVpnConnections(DescribeVpnConnectionsRequest describeVpnConnectionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ResetImageAttributeResult resetImageAttribute(ResetImageAttributeRequest resetImageAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public EnableVgwRoutePropagationResult enableVgwRoutePropagation(EnableVgwRoutePropagationRequest enableVgwRoutePropagationRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateSnapshotResult createSnapshot(CreateSnapshotRequest createSnapshotRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteVolumeResult deleteVolume(DeleteVolumeRequest deleteVolumeRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVolumeResult deleteVolume(DeleteVolumeRequest deleteVolumeRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateNetworkInterfaceResult createNetworkInterface(CreateNetworkInterfaceRequest createNetworkInterfaceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyReservedInstancesResult modifyReservedInstances(ModifyReservedInstancesRequest modifyReservedInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelSpotFleetRequestsResult cancelSpotFleetRequests(CancelSpotFleetRequestsRequest cancelSpotFleetRequestsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public UnassignPrivateIpAddressesResult unassignPrivateIpAddresses(UnassignPrivateIpAddressesRequest unassignPrivateIpAddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public UnassignIpv6AddressesResult unassignIpv6Addresses(UnassignIpv6AddressesRequest unassignIpv6AddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpcsResult describeVpcs(DescribeVpcsRequest describeVpcsRequest) throws AmazonServiceException, AmazonClientException { + public DescribeVpcsResult describeVpcs(DescribeVpcsRequest describeVpcsRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelConversionTaskResult cancelConversionTask(CancelConversionTaskRequest cancelConversionTaskRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateAddressResult associateAddress(AssociateAddressRequest associateAddressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AssociateClientVpnTargetNetworkResult associateClientVpnTargetNetwork( + AssociateClientVpnTargetNetworkRequest associateClientVpnTargetNetworkRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateIamInstanceProfileResult associateIamInstanceProfile(AssociateIamInstanceProfileRequest associateIamInstanceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateVpcCidrBlockResult associateVpcCidrBlock(AssociateVpcCidrBlockRequest associateVpcCidrBlockRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateSubnetCidrBlockResult associateSubnetCidrBlock(AssociateSubnetCidrBlockRequest associateSubnetCidrBlockRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AssociateTransitGatewayRouteTableResult associateTransitGatewayRouteTable( + AssociateTransitGatewayRouteTableRequest associateTransitGatewayRouteTableRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteCustomerGatewayResult deleteCustomerGateway(DeleteCustomerGatewayRequest deleteCustomerGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateNetworkAclEntryResult createNetworkAclEntry(CreateNetworkAclEntryRequest createNetworkAclEntryRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AcceptVpcPeeringConnectionResult acceptVpcPeeringConnection(AcceptVpcPeeringConnectionRequest acceptVpcPeeringConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeExportTasksResult describeExportTasks(DescribeExportTasksRequest describeExportTasksRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeElasticGpusResult describeElasticGpus(DescribeElasticGpusRequest describeElasticGpusRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeFpgaImagesResult describeFpgaImages(DescribeFpgaImagesRequest describeFpgaImagesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeHostReservationOfferingsResult describeHostReservationOfferings( DescribeHostReservationOfferingsRequest describeHostReservationOfferingsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeHostReservationsResult describeHostReservations(DescribeHostReservationsRequest describeHostReservationsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeIdentityIdFormatResult describeIdentityIdFormat(DescribeIdentityIdFormatRequest describeIdentityIdFormatRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DetachInternetGatewayResult detachInternetGateway(DetachInternetGatewayRequest detachInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateVpcPeeringConnectionResult createVpcPeeringConnection(CreateVpcPeeringConnectionRequest createVpcPeeringConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateRouteTableResult createRouteTable(CreateRouteTableRequest createRouteTableRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelImportTaskResult cancelImportTask(CancelImportTaskRequest cancelImportTaskRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVolumesResult describeVolumes(DescribeVolumesRequest describeVolumesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesListingsResult describeReservedInstancesListings( DescribeReservedInstancesListingsRequest describeReservedInstancesListingsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ReportInstanceStatusResult reportInstanceStatus(ReportInstanceStatusRequest reportInstanceStatusRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeRouteTablesResult describeRouteTables(DescribeRouteTablesRequest describeRouteTablesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeDhcpOptionsResult describeDhcpOptions(DescribeDhcpOptionsRequest describeDhcpOptionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeEgressOnlyInternetGatewaysResult describeEgressOnlyInternetGateways( DescribeEgressOnlyInternetGatewaysRequest describeEgressOnlyInternetGatewaysRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public MonitorInstancesResult monitorInstances(MonitorInstancesRequest monitorInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribePrefixListsResult describePrefixLists(DescribePrefixListsRequest describePrefixListsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RequestSpotFleetResult requestSpotFleet(RequestSpotFleetRequest requestSpotFleetRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeImportImageTasksResult describeImportImageTasks(DescribeImportImageTasksRequest describeImportImageTasksRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeNetworkAclsResult describeNetworkAcls(DescribeNetworkAclsRequest describeNetworkAclsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeBundleTasksResult describeBundleTasks(DescribeBundleTasksRequest describeBundleTasksRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ImportInstanceResult importInstance(ImportInstanceRequest importInstanceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteVpcPeeringConnectionResult deleteVpcPeeringConnection(DeleteVpcPeeringConnectionRequest deleteVpcPeeringConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public GetConsoleOutputResult getConsoleOutput(GetConsoleOutputRequest getConsoleOutputRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public GetConsoleScreenshotResult getConsoleScreenshot(GetConsoleScreenshotRequest getConsoleScreenshotRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public GetHostReservationPurchasePreviewResult getHostReservationPurchasePreview( GetHostReservationPurchasePreviewRequest getHostReservationPurchasePreviewRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public GetLaunchTemplateDataResult getLaunchTemplateData(GetLaunchTemplateDataRequest getLaunchTemplateDataRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateInternetGatewayResult createInternetGateway(CreateInternetGatewayRequest createInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteVpnConnectionRouteResult deleteVpnConnectionRoute(DeleteVpnConnectionRouteRequest deleteVpnConnectionRouteRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DetachNetworkInterfaceResult detachNetworkInterface(DetachNetworkInterfaceRequest detachNetworkInterfaceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyImageAttributeResult modifyImageAttribute(ModifyImageAttributeRequest modifyImageAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateCustomerGatewayResult createCustomerGateway(CreateCustomerGatewayRequest createCustomerGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateDefaultSubnetResult createDefaultSubnet(CreateDefaultSubnetRequest createDefaultSubnetRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateEgressOnlyInternetGatewayResult createEgressOnlyInternetGateway( CreateEgressOnlyInternetGatewayRequest createEgressOnlyInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateFleetResult createFleet(CreateFleetRequest createFleetRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateFpgaImageResult createFpgaImage(CreateFpgaImageRequest createFpgaImageRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateNetworkInterfacePermissionResult createNetworkInterfacePermission( CreateNetworkInterfacePermissionRequest createNetworkInterfacePermissionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateDefaultVpcResult createDefaultVpc(CreateDefaultVpcRequest createDefaultVpcRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateSpotDatafeedSubscriptionResult createSpotDatafeedSubscription( CreateSpotDatafeedSubscriptionRequest createSpotDatafeedSubscriptionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AttachInternetGatewayResult attachInternetGateway(AttachInternetGatewayRequest attachInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteVpnConnectionResult deleteVpnConnection(DeleteVpnConnectionRequest deleteVpnConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeMovingAddressesResult describeMovingAddresses(DescribeMovingAddressesRequest describeMovingAddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeConversionTasksResult describeConversionTasks(DescribeConversionTasksRequest describeConversionTasksRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateVpnConnectionResult createVpnConnection(CreateVpnConnectionRequest createVpnConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ImportImageResult importImage(ImportImageRequest importImageRequest) throws AmazonServiceException, AmazonClientException { + public ImportImageResult importImage(ImportImageRequest importImageRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisableVpcClassicLinkResult disableVpcClassicLink(DisableVpcClassicLinkRequest disableVpcClassicLinkRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1412,31 +1770,37 @@ public DisableVpcClassicLinkDnsSupportResult disableVpcClassicLinkDnsSupport( @Override public DescribeInstanceAttributeResult describeInstanceAttribute(DescribeInstanceAttributeRequest describeInstanceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeInstanceCreditSpecificationsResult describeInstanceCreditSpecifications( + DescribeInstanceCreditSpecificationsRequest describeInstanceCreditSpecificationsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeFlowLogsResult describeFlowLogs(DescribeFlowLogsRequest describeFlowLogsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpcPeeringConnectionsResult describeVpcPeeringConnections( DescribeVpcPeeringConnectionsRequest describeVpcPeeringConnectionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribePlacementGroupsResult describePlacementGroups(DescribePlacementGroupsRequest describePlacementGroupsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public RunInstancesResult runInstances(RunInstancesRequest runInstancesRequest) throws AmazonServiceException, AmazonClientException { + public RunInstancesResult runInstances(RunInstancesRequest runInstancesRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1445,45 +1809,63 @@ public RunScheduledInstancesResult runScheduledInstances(RunScheduledInstancesRe throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public SearchTransitGatewayRoutesResult searchTransitGatewayRoutes( + SearchTransitGatewayRoutesRequest searchTransitGatewayRoutesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DescribeSubnetsResult describeSubnets(DescribeSubnetsRequest describeSubnetsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateRouteTableResult associateRouteTable(AssociateRouteTableRequest associateRouteTableRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyVolumeAttributeResult modifyVolumeAttribute(ModifyVolumeAttributeRequest modifyVolumeAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteNetworkAclResult deleteNetworkAcl(DeleteNetworkAclRequest deleteNetworkAclRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeImagesResult describeImages(DescribeImagesRequest describeImagesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public StartInstancesResult startInstances(StartInstancesRequest startInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyInstanceAttributeResult modifyInstanceAttribute(ModifyInstanceAttributeRequest modifyInstanceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyInstanceCapacityReservationAttributesResult modifyInstanceCapacityReservationAttributes( + ModifyInstanceCapacityReservationAttributesRequest modifyInstanceCapacityReservationAttributesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyInstanceCreditSpecificationResult modifyInstanceCreditSpecification( + ModifyInstanceCreditSpecificationRequest modifyInstanceCreditSpecificationRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1492,6 +1874,11 @@ public ModifyInstancePlacementResult modifyInstancePlacement(ModifyInstancePlace throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public ModifyLaunchTemplateResult modifyLaunchTemplate(ModifyLaunchTemplateRequest modifyLaunchTemplateRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public ModifyIdentityIdFormatResult modifyIdentityIdFormat(ModifyIdentityIdFormatRequest modifyIdentityIdFormatRequest) { throw new UnsupportedOperationException("Not supported in mock"); @@ -1500,175 +1887,224 @@ public ModifyIdentityIdFormatResult modifyIdentityIdFormat(ModifyIdentityIdForma @Override public CancelReservedInstancesListingResult cancelReservedInstancesListing( CancelReservedInstancesListingRequest cancelReservedInstancesListingRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteDhcpOptionsResult deleteDhcpOptions(DeleteDhcpOptionsRequest deleteDhcpOptionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteEgressOnlyInternetGatewayResult deleteEgressOnlyInternetGateway( DeleteEgressOnlyInternetGatewayRequest deleteEgressOnlyInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteFleetsResult deleteFleets(DeleteFleetsRequest deleteFleetsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteNetworkInterfacePermissionResult deleteNetworkInterfacePermission( DeleteNetworkInterfacePermissionRequest deleteNetworkInterfacePermissionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AuthorizeSecurityGroupIngressResult authorizeSecurityGroupIngress( AuthorizeSecurityGroupIngressRequest authorizeSecurityGroupIngressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotInstanceRequestsResult describeSpotInstanceRequests( - DescribeSpotInstanceRequestsRequest describeSpotInstanceRequestsRequest) throws AmazonServiceException, AmazonClientException { + DescribeSpotInstanceRequestsRequest describeSpotInstanceRequestsRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateVpcResult createVpc(CreateVpcRequest createVpcRequest) throws AmazonServiceException, AmazonClientException { + public CreateVpcResult createVpc(CreateVpcRequest createVpcRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeCustomerGatewaysResult describeCustomerGateways(DescribeCustomerGatewaysRequest describeCustomerGatewaysRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelExportTaskResult cancelExportTask(CancelExportTaskRequest cancelExportTaskRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateRouteResult createRoute(CreateRouteRequest createRouteRequest) throws AmazonServiceException, AmazonClientException { + public CreateRouteResult createRoute(CreateRouteRequest createRouteRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateVpcEndpointResult createVpcEndpoint(CreateVpcEndpointRequest createVpcEndpointRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateVpcEndpointConnectionNotificationResult createVpcEndpointConnectionNotification( + CreateVpcEndpointConnectionNotificationRequest createVpcEndpointConnectionNotificationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateVpcEndpointServiceConfigurationResult createVpcEndpointServiceConfiguration( + CreateVpcEndpointServiceConfigurationRequest createVpcEndpointServiceConfigurationRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CopyImageResult copyImage(CopyImageRequest copyImageRequest) throws AmazonServiceException, AmazonClientException { + public CopyImageResult copyImage(CopyImageRequest copyImageRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpcClassicLinkResult describeVpcClassicLink(DescribeVpcClassicLinkRequest describeVpcClassicLinkRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyNetworkInterfaceAttributeResult modifyNetworkInterfaceAttribute( ModifyNetworkInterfaceAttributeRequest modifyNetworkInterfaceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteRouteTableResult deleteRouteTable(DeleteRouteTableRequest deleteRouteTableRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeNetworkInterfaceAttributeResult describeNetworkInterfaceAttribute( DescribeNetworkInterfaceAttributeRequest describeNetworkInterfaceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeClassicLinkInstancesResult describeClassicLinkInstances( - DescribeClassicLinkInstancesRequest describeClassicLinkInstancesRequest) throws AmazonServiceException, AmazonClientException { + DescribeClassicLinkInstancesRequest describeClassicLinkInstancesRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RequestSpotInstancesResult requestSpotInstances(RequestSpotInstancesRequest requestSpotInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateTagsResult createTags(CreateTagsRequest createTagsRequest) throws AmazonServiceException, AmazonClientException { + public ResetFpgaImageAttributeResult resetFpgaImageAttribute(ResetFpgaImageAttributeRequest resetFpgaImageAttributeRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateTagsResult createTags(CreateTagsRequest createTagsRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateTransitGatewayResult createTransitGateway(CreateTransitGatewayRequest createTransitGatewayRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateTransitGatewayRouteResult createTransitGatewayRoute(CreateTransitGatewayRouteRequest createTransitGatewayRouteRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateTransitGatewayRouteTableResult createTransitGatewayRouteTable( + CreateTransitGatewayRouteTableRequest createTransitGatewayRouteTableRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateTransitGatewayVpcAttachmentResult createTransitGatewayVpcAttachment( + CreateTransitGatewayVpcAttachmentRequest createTransitGatewayVpcAttachmentRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVolumeAttributeResult describeVolumeAttribute(DescribeVolumeAttributeRequest describeVolumeAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AttachNetworkInterfaceResult attachNetworkInterface(AttachNetworkInterfaceRequest attachNetworkInterfaceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ReplaceRouteResult replaceRoute(ReplaceRouteRequest replaceRouteRequest) throws AmazonServiceException, AmazonClientException { + public ReplaceRouteResult replaceRoute(ReplaceRouteRequest replaceRouteRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeTagsResult describeTags(DescribeTagsRequest describeTagsRequest) throws AmazonServiceException, AmazonClientException { + public DescribeTagsResult describeTags(DescribeTagsRequest describeTagsRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelBundleTaskResult cancelBundleTask(CancelBundleTaskRequest cancelBundleTaskRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CancelCapacityReservationResult cancelCapacityReservation(CancelCapacityReservationRequest cancelCapacityReservationRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisableVgwRoutePropagationResult disableVgwRoutePropagation(DisableVgwRoutePropagationRequest disableVgwRoutePropagationRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ImportSnapshotResult importSnapshot(ImportSnapshotRequest importSnapshotRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelSpotInstanceRequestsResult cancelSpotInstanceRequests(CancelSpotInstanceRequestsRequest cancelSpotInstanceRequestsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotFleetRequestsResult describeSpotFleetRequests(DescribeSpotFleetRequestsRequest describeSpotFleetRequestsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public PurchaseReservedInstancesOfferingResult purchaseReservedInstancesOffering( PurchaseReservedInstancesOfferingRequest purchaseReservedInstancesOfferingRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1685,243 +2121,323 @@ public PurchaseHostReservationResult purchaseHostReservation(PurchaseHostReserva @Override public ModifySnapshotAttributeResult modifySnapshotAttribute(ModifySnapshotAttributeRequest modifySnapshotAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesModificationsResult describeReservedInstancesModifications( DescribeReservedInstancesModificationsRequest describeReservedInstancesModificationsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public TerminateInstancesResult terminateInstances(TerminateInstancesRequest terminateInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyVpcEndpointResult modifyVpcEndpoint(ModifyVpcEndpointRequest modifyVpcEndpointRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyVpcEndpointConnectionNotificationResult modifyVpcEndpointConnectionNotification( + ModifyVpcEndpointConnectionNotificationRequest modifyVpcEndpointConnectionNotificationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyVpcEndpointServiceConfigurationResult modifyVpcEndpointServiceConfiguration( + ModifyVpcEndpointServiceConfigurationRequest modifyVpcEndpointServiceConfigurationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyVpcEndpointServicePermissionsResult modifyVpcEndpointServicePermissions( + ModifyVpcEndpointServicePermissionsRequest modifyVpcEndpointServicePermissionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription( DeleteSpotDatafeedSubscriptionRequest deleteSpotDatafeedSubscriptionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteInternetGatewayResult deleteInternetGateway(DeleteInternetGatewayRequest deleteInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSnapshotAttributeResult describeSnapshotAttribute(DescribeSnapshotAttributeRequest describeSnapshotAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ReplaceRouteTableAssociationResult replaceRouteTableAssociation( - ReplaceRouteTableAssociationRequest replaceRouteTableAssociationRequest) throws AmazonServiceException, AmazonClientException { + ReplaceRouteTableAssociationRequest replaceRouteTableAssociationRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ReplaceTransitGatewayRouteResult replaceTransitGatewayRoute( + ReplaceTransitGatewayRouteRequest replaceTransitGatewayRouteRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeAddressesResult describeAddresses(DescribeAddressesRequest describeAddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeImageAttributeResult describeImageAttribute(DescribeImageAttributeRequest describeImageAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeKeyPairsResult describeKeyPairs(DescribeKeyPairsRequest describeKeyPairsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ConfirmProductInstanceResult confirmProductInstance(ConfirmProductInstanceRequest confirmProductInstanceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CopyFpgaImageResult copyFpgaImage(CopyFpgaImageRequest copyFpgaImageRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisassociateRouteTableResult disassociateRouteTable(DisassociateRouteTableRequest disassociateRouteTableRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisassociateIamInstanceProfileResult disassociateIamInstanceProfile( DisassociateIamInstanceProfileRequest disassociateIamInstanceProfileRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisassociateVpcCidrBlockResult disassociateVpcCidrBlock(DisassociateVpcCidrBlockRequest disassociateVpcCidrBlockRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public EnableTransitGatewayRouteTablePropagationResult enableTransitGatewayRouteTablePropagation( + EnableTransitGatewayRouteTablePropagationRequest enableTransitGatewayRouteTablePropagationRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisassociateSubnetCidrBlockResult disassociateSubnetCidrBlock( - DisassociateSubnetCidrBlockRequest disassociateSubnetCidrBlockRequest) throws AmazonServiceException, AmazonClientException { + DisassociateSubnetCidrBlockRequest disassociateSubnetCidrBlockRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisassociateTransitGatewayRouteTableResult disassociateTransitGatewayRouteTable( + DisassociateTransitGatewayRouteTableRequest disassociateTransitGatewayRouteTableRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpcAttributeResult describeVpcAttribute(DescribeVpcAttributeRequest describeVpcAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RevokeSecurityGroupEgressResult revokeSecurityGroupEgress(RevokeSecurityGroupEgressRequest revokeSecurityGroupEgressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteNetworkAclEntryResult deleteNetworkAclEntry(DeleteNetworkAclEntryRequest deleteNetworkAclEntryRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateVolumeResult createVolume(CreateVolumeRequest createVolumeRequest) throws AmazonServiceException, AmazonClientException { + public CreateVolumeResult createVolume(CreateVolumeRequest createVolumeRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ModifyVolumeResult modifyVolume(ModifyVolumeRequest modifyVolumeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyVolumeResult modifyVolume(ModifyVolumeRequest modifyVolumeRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeInstanceStatusResult describeInstanceStatus(DescribeInstanceStatusRequest describeInstanceStatusRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpnGatewaysResult describeVpnGateways(DescribeVpnGatewaysRequest describeVpnGatewaysRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateSubnetResult createSubnet(CreateSubnetRequest createSubnetRequest) throws AmazonServiceException, AmazonClientException { + public CreateSubnetResult createSubnet(CreateSubnetRequest createSubnetRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings( DescribeReservedInstancesOfferingsRequest describeReservedInstancesOfferingsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssignPrivateIpAddressesResult assignPrivateIpAddresses(AssignPrivateIpAddressesRequest assignPrivateIpAddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssignIpv6AddressesResult assignIpv6Addresses(AssignIpv6AddressesRequest assignIpv6AddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotFleetRequestHistoryResult describeSpotFleetRequestHistory( DescribeSpotFleetRequestHistoryRequest describeSpotFleetRequestHistoryRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteSnapshotResult deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ReplaceNetworkAclAssociationResult replaceNetworkAclAssociation( ReplaceNetworkAclAssociationRequest replaceNetworkAclAssociationRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisassociateAddressResult disassociateAddress(DisassociateAddressRequest disassociateAddressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisassociateClientVpnTargetNetworkResult disassociateClientVpnTargetNetwork( + DisassociateClientVpnTargetNetworkRequest disassociateClientVpnTargetNetworkRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreatePlacementGroupResult createPlacementGroup(CreatePlacementGroupRequest createPlacementGroupRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public BundleInstanceResult bundleInstance(BundleInstanceRequest bundleInstanceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeletePlacementGroupResult deletePlacementGroup(DeletePlacementGroupRequest deletePlacementGroupRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifySubnetAttributeResult modifySubnetAttribute(ModifySubnetAttributeRequest modifySubnetAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteVpcResult deleteVpc(DeleteVpcRequest deleteVpcRequest) throws AmazonServiceException, AmazonClientException { + public ModifyTransitGatewayVpcAttachmentResult modifyTransitGatewayVpcAttachment( + ModifyTransitGatewayVpcAttachmentRequest modifyTransitGatewayVpcAttachmentRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CopySnapshotResult copySnapshot(CopySnapshotRequest copySnapshotRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpcResult deleteVpc(DeleteVpcRequest deleteVpcRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteVpcEndpointConnectionNotificationsResult deleteVpcEndpointConnectionNotifications( + DeleteVpcEndpointConnectionNotificationsRequest deleteVpcEndpointConnectionNotificationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteVpcEndpointServiceConfigurationsResult deleteVpcEndpointServiceConfigurations( + DeleteVpcEndpointServiceConfigurationsRequest deleteVpcEndpointServiceConfigurationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CopySnapshotResult copySnapshot(CopySnapshotRequest copySnapshotRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateCapacityReservationResult createCapacityReservation(CreateCapacityReservationRequest createCapacityReservationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateClientVpnEndpointResult createClientVpnEndpoint(CreateClientVpnEndpointRequest createClientVpnEndpointRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateClientVpnRouteResult createClientVpnRoute(CreateClientVpnRouteRequest createClientVpnRouteRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpcEndpointServicesResult describeVpcEndpointServices( DescribeVpcEndpointServicesRequest describeVpcEndpointServicesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AllocateAddressResult allocateAddress(AllocateAddressRequest allocateAddressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ReleaseAddressResult releaseAddress(ReleaseAddressRequest releaseAddressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1938,13 +2454,24 @@ public ReplaceIamInstanceProfileAssociationResult replaceIamInstanceProfileAssoc @Override public ResetInstanceAttributeResult resetInstanceAttribute(ResetInstanceAttributeRequest resetInstanceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateKeyPairResult createKeyPair(CreateKeyPairRequest createKeyPairRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateLaunchTemplateResult createLaunchTemplate(CreateLaunchTemplateRequest createLaunchTemplateRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateLaunchTemplateVersionResult createLaunchTemplateVersion( + CreateLaunchTemplateVersionRequest createLaunchTemplateVersionRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1955,38 +2482,50 @@ public CreateNatGatewayResult createNatGateway(CreateNatGatewayRequest createNat @Override public ReplaceNetworkAclEntryResult replaceNetworkAclEntry(ReplaceNetworkAclEntryRequest replaceNetworkAclEntryRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSnapshotsResult describeSnapshots(DescribeSnapshotsRequest describeSnapshotsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateNetworkAclResult createNetworkAcl(CreateNetworkAclRequest createNetworkAclRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RegisterImageResult registerImage(RegisterImageRequest registerImageRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public RejectTransitGatewayVpcAttachmentResult rejectTransitGatewayVpcAttachment( + RejectTransitGatewayVpcAttachmentRequest rejectTransitGatewayVpcAttachmentRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public RejectVpcEndpointConnectionsResult rejectVpcEndpointConnections( + RejectVpcEndpointConnectionsRequest rejectVpcEndpointConnectionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ResetNetworkInterfaceAttributeResult resetNetworkInterfaceAttribute( ResetNetworkInterfaceAttributeRequest resetNetworkInterfaceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public EnableVpcClassicLinkResult enableVpcClassicLink(EnableVpcClassicLinkRequest enableVpcClassicLinkRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1996,122 +2535,160 @@ public EnableVpcClassicLinkDnsSupportResult enableVpcClassicLinkDnsSupport( throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public ExportClientVpnClientCertificateRevocationListResult exportClientVpnClientCertificateRevocationList( + ExportClientVpnClientCertificateRevocationListRequest exportClientVpnClientCertificateRevocationListRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ExportClientVpnClientConfigurationResult exportClientVpnClientConfiguration( + ExportClientVpnClientConfigurationRequest exportClientVpnClientConfigurationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ExportTransitGatewayRoutesResult exportTransitGatewayRoutes( + ExportTransitGatewayRoutesRequest exportTransitGatewayRoutesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public CreateVpnConnectionRouteResult createVpnConnectionRoute(CreateVpnConnectionRouteRequest createVpnConnectionRouteRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpcEndpointsResult describeVpcEndpoints(DescribeVpcEndpointsRequest describeVpcEndpointsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DetachClassicLinkVpcResult detachClassicLinkVpc(DetachClassicLinkVpcRequest detachClassicLinkVpcRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeReservedInstancesResult describeReservedInstances() throws AmazonServiceException, AmazonClientException { + public DescribeReservedInstancesResult describeReservedInstances() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeAvailabilityZonesResult describeAvailabilityZones() throws AmazonServiceException, AmazonClientException { + public DescribeAvailabilityZonesResult describeAvailabilityZones() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSpotPriceHistoryResult describeSpotPriceHistory() throws AmazonServiceException, AmazonClientException { + public DescribeSpotPriceHistoryResult describeSpotPriceHistory() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeNetworkInterfacesResult describeNetworkInterfaces() throws AmazonServiceException, AmazonClientException { + public DescribeNetworkInterfacesResult describeNetworkInterfaces() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeRegionsResult describeRegions() throws AmazonServiceException, AmazonClientException { + public DescribeRegionsResult describeRegions() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeInternetGatewaysResult describeInternetGateways() throws AmazonServiceException, AmazonClientException { + public DescribeInternetGatewaysResult describeInternetGateways() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSecurityGroupsResult describeSecurityGroups() throws AmazonServiceException, AmazonClientException { + public DescribeSecurityGroupsResult describeSecurityGroups() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSpotDatafeedSubscriptionResult describeSpotDatafeedSubscription() throws AmazonServiceException, AmazonClientException { + public DescribeSpotDatafeedSubscriptionResult describeSpotDatafeedSubscription() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeAccountAttributesResult describeAccountAttributes() throws AmazonServiceException, AmazonClientException { + public DescribeAccountAttributesResult describeAccountAttributes() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVolumeStatusResult describeVolumeStatus() throws AmazonServiceException, AmazonClientException { + public DescribeVolumeStatusResult describeVolumeStatus() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeImportSnapshotTasksResult describeImportSnapshotTasks() throws AmazonServiceException, AmazonClientException { + public DescribeImportSnapshotTasksResult describeImportSnapshotTasks() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpnConnectionsResult describeVpnConnections() throws AmazonServiceException, AmazonClientException { + public DescribeVpnConnectionsResult describeVpnConnections() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpcsResult describeVpcs() throws AmazonServiceException, AmazonClientException { + public DescribeVpcsResult describeVpcs() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public AcceptVpcPeeringConnectionResult acceptVpcPeeringConnection() throws AmazonServiceException, AmazonClientException { + public AcceptVpcPeeringConnectionResult acceptVpcPeeringConnection() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeExportTasksResult describeExportTasks() throws AmazonServiceException, AmazonClientException { + public AdvertiseByoipCidrResult advertiseByoipCidr(AdvertiseByoipCidrRequest advertiseByoipCidrRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateVpcPeeringConnectionResult createVpcPeeringConnection() throws AmazonServiceException, AmazonClientException { + public DescribeExportTasksResult describeExportTasks() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CancelImportTaskResult cancelImportTask() throws AmazonServiceException, AmazonClientException { + public DescribeFleetHistoryResult describeFleetHistory(DescribeFleetHistoryRequest describeFleetHistoryRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVolumesResult describeVolumes() throws AmazonServiceException, AmazonClientException { + public DescribeFleetInstancesResult describeFleetInstances(DescribeFleetInstancesRequest describeFleetInstancesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeFleetsResult describeFleets(DescribeFleetsRequest describeFleetsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateVpcPeeringConnectionResult createVpcPeeringConnection() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CancelImportTaskResult cancelImportTask() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeVolumesResult describeVolumes() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesListingsResult describeReservedInstancesListings() - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeRouteTablesResult describeRouteTables() throws AmazonServiceException, AmazonClientException { + public DescribeRouteTablesResult describeRouteTables() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2128,48 +2705,69 @@ public DescribeScheduledInstancesResult describeScheduledInstances( } @Override - public DescribeDhcpOptionsResult describeDhcpOptions() throws AmazonServiceException, AmazonClientException { + public DescribeDhcpOptionsResult describeDhcpOptions() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribePrefixListsResult describePrefixLists() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribePrefixListsResult describePrefixLists() throws AmazonServiceException, AmazonClientException { + public DescribePrincipalIdFormatResult describePrincipalIdFormat(DescribePrincipalIdFormatRequest describePrincipalIdFormatRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeImportImageTasksResult describeImportImageTasks() throws AmazonServiceException, AmazonClientException { + public DescribePublicIpv4PoolsResult describePublicIpv4Pools(DescribePublicIpv4PoolsRequest describePublicIpv4PoolsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeNetworkAclsResult describeNetworkAcls() throws AmazonServiceException, AmazonClientException { + public DescribeImportImageTasksResult describeImportImageTasks() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeBundleTasksResult describeBundleTasks() throws AmazonServiceException, AmazonClientException { + public DescribeNetworkAclsResult describeNetworkAcls() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeBundleTasksResult describeBundleTasks() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeByoipCidrsResult describeByoipCidrs(DescribeByoipCidrsRequest describeByoipCidrsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeCapacityReservationsResult describeCapacityReservations( + DescribeCapacityReservationsRequest describeCapacityReservationsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress(RevokeSecurityGroupIngressRequest revokeSecurityGroupIngressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress() throws AmazonServiceException, AmazonClientException { + public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateInternetGatewayResult createInternetGateway() throws AmazonServiceException, AmazonClientException { + public CreateInternetGatewayResult createInternetGateway() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeMovingAddressesResult describeMovingAddresses() throws AmazonServiceException, AmazonClientException { + public DescribeMovingAddressesResult describeMovingAddresses() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2179,17 +2777,23 @@ public DescribeNatGatewaysResult describeNatGateways(DescribeNatGatewaysRequest } @Override - public DescribeConversionTasksResult describeConversionTasks() throws AmazonServiceException, AmazonClientException { + public DescribeConversionTasksResult describeConversionTasks() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ImportImageResult importImage() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ImportImageResult importImage() throws AmazonServiceException, AmazonClientException { + public DescribeFlowLogsResult describeFlowLogs() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeFlowLogsResult describeFlowLogs() throws AmazonServiceException, AmazonClientException { + public DescribeFpgaImageAttributeResult describeFpgaImageAttribute( + DescribeFpgaImageAttributeRequest describeFpgaImageAttributeRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2206,7 +2810,7 @@ public DescribeHostsResult describeHosts() { @Override public DescribeIamInstanceProfileAssociationsResult describeIamInstanceProfileAssociations( DescribeIamInstanceProfileAssociationsRequest describeIamInstanceProfileAssociationsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2221,42 +2825,42 @@ public DescribeIdFormatResult describeIdFormat() { } @Override - public DescribeVpcPeeringConnectionsResult describeVpcPeeringConnections() throws AmazonServiceException, AmazonClientException { + public DescribeVpcPeeringConnectionsResult describeVpcPeeringConnections() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribePlacementGroupsResult describePlacementGroups() throws AmazonServiceException, AmazonClientException { + public DescribePlacementGroupsResult describePlacementGroups() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSubnetsResult describeSubnets() throws AmazonServiceException, AmazonClientException { + public DescribeSubnetsResult describeSubnets() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeInstancesResult describeInstances() throws AmazonServiceException, AmazonClientException { + public DescribeInstancesResult describeInstances() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeImagesResult describeImages() throws AmazonServiceException, AmazonClientException { + public DescribeImagesResult describeImages() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSpotInstanceRequestsResult describeSpotInstanceRequests() throws AmazonServiceException, AmazonClientException { + public DescribeSpotInstanceRequestsResult describeSpotInstanceRequests() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeCustomerGatewaysResult describeCustomerGateways() throws AmazonServiceException, AmazonClientException { + public DescribeCustomerGatewaysResult describeCustomerGateways() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpcClassicLinkResult describeVpcClassicLink() throws AmazonServiceException, AmazonClientException { + public DescribeVpcClassicLinkResult describeVpcClassicLink() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2267,69 +2871,162 @@ public DescribeVpcClassicLinkDnsSupportResult describeVpcClassicLinkDnsSupport( } @Override - public DescribeClassicLinkInstancesResult describeClassicLinkInstances() throws AmazonServiceException, AmazonClientException { + public DescribeVpcEndpointConnectionNotificationsResult describeVpcEndpointConnectionNotifications( + DescribeVpcEndpointConnectionNotificationsRequest describeVpcEndpointConnectionNotificationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeVpcEndpointConnectionsResult describeVpcEndpointConnections( + DescribeVpcEndpointConnectionsRequest describeVpcEndpointConnectionsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeVpcEndpointServiceConfigurationsResult describeVpcEndpointServiceConfigurations( + DescribeVpcEndpointServiceConfigurationsRequest describeVpcEndpointServiceConfigurationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeVpcEndpointServicePermissionsResult describeVpcEndpointServicePermissions( + DescribeVpcEndpointServicePermissionsRequest describeVpcEndpointServicePermissionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeTagsResult describeTags() throws AmazonServiceException, AmazonClientException { + public DescribeClassicLinkInstancesResult describeClassicLinkInstances() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ImportSnapshotResult importSnapshot() throws AmazonServiceException, AmazonClientException { + public DescribeClientVpnAuthorizationRulesResult describeClientVpnAuthorizationRules( + DescribeClientVpnAuthorizationRulesRequest describeClientVpnAuthorizationRulesRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSpotFleetRequestsResult describeSpotFleetRequests() throws AmazonServiceException, AmazonClientException { + public DescribeClientVpnConnectionsResult describeClientVpnConnections( + DescribeClientVpnConnectionsRequest describeClientVpnConnectionsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeClientVpnEndpointsResult describeClientVpnEndpoints( + DescribeClientVpnEndpointsRequest describeClientVpnEndpointsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeClientVpnRoutesResult describeClientVpnRoutes( + DescribeClientVpnRoutesRequest describeClientVpnRoutesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeClientVpnTargetNetworksResult describeClientVpnTargetNetworks( + DescribeClientVpnTargetNetworksRequest describeClientVpnTargetNetworksRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeTagsResult describeTags() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeTransitGatewayAttachmentsResult describeTransitGatewayAttachments( + DescribeTransitGatewayAttachmentsRequest describeTransitGatewayAttachmentsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeTransitGatewayRouteTablesResult describeTransitGatewayRouteTables( + DescribeTransitGatewayRouteTablesRequest describeTransitGatewayRouteTablesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeTransitGatewayVpcAttachmentsResult describeTransitGatewayVpcAttachments( + DescribeTransitGatewayVpcAttachmentsRequest describeTransitGatewayVpcAttachmentsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeTransitGatewaysResult describeTransitGateways(DescribeTransitGatewaysRequest describeTransitGatewaysRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ImportSnapshotResult importSnapshot() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeSpotFleetRequestsResult describeSpotFleetRequests() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesModificationsResult describeReservedInstancesModifications() - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeAddressesResult describeAddresses() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeAggregateIdFormatResult describeAggregateIdFormat(DescribeAggregateIdFormatRequest describeAggregateIdFormatRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription() throws AmazonServiceException, AmazonClientException { + public DescribeKeyPairsResult describeKeyPairs() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeAddressesResult describeAddresses() throws AmazonServiceException, AmazonClientException { + public DescribeLaunchTemplateVersionsResult describeLaunchTemplateVersions( + DescribeLaunchTemplateVersionsRequest describeLaunchTemplateVersionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeKeyPairsResult describeKeyPairs() throws AmazonServiceException, AmazonClientException { + public DescribeLaunchTemplatesResult describeLaunchTemplates(DescribeLaunchTemplatesRequest describeLaunchTemplatesRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeInstanceStatusResult describeInstanceStatus() throws AmazonServiceException, AmazonClientException { + public DescribeInstanceStatusResult describeInstanceStatus() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpnGatewaysResult describeVpnGateways() throws AmazonServiceException, AmazonClientException { + public DescribeVpnGatewaysResult describeVpnGateways() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings() - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpcEndpointServicesResult describeVpcEndpointServices() throws AmazonServiceException, AmazonClientException { + public DescribeVpcEndpointServicesResult describeVpcEndpointServices() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public AllocateAddressResult allocateAddress() throws AmazonServiceException, AmazonClientException { + public AllocateAddressResult allocateAddress() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2339,18 +3036,24 @@ public AllocateHostsResult allocateHosts(AllocateHostsRequest allocateHostsReque } @Override - public DescribeSnapshotsResult describeSnapshots() throws AmazonServiceException, AmazonClientException { + public ApplySecurityGroupsToClientVpnTargetNetworkResult applySecurityGroupsToClientVpnTargetNetwork( + ApplySecurityGroupsToClientVpnTargetNetworkRequest applySecurityGroupsToClientVpnTargetNetworkRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeSnapshotsResult describeSnapshots() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpcEndpointsResult describeVpcEndpoints() throws AmazonServiceException, AmazonClientException { + public DescribeVpcEndpointsResult describeVpcEndpoints() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DryRunResult dryRun(DryRunSupportedRequest request) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2370,7 +3073,7 @@ public ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest reques @Override public ModifySpotFleetRequestResult modifySpotFleetRequest(ModifySpotFleetRequestRequest modifySpotFleetRequestRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } } From 37b6173e209b7a54ed97ec76e08cc0a2fc44adde Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 8 May 2019 09:27:06 +0200 Subject: [PATCH 006/321] Allow reindexing into write alias (#41677) Fixes an issue where reindex currently fails if the destination is an alias pointing to multiple indices, even it is using a write index. Closes #41667 --- .../index/reindex/TransportReindexAction.java | 2 +- .../ReindexSourceTargetValidationTests.java | 36 +++++++++++++++---- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index e4b6b6a07d9ce..4928e4fd01f26 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -186,7 +186,7 @@ static void validateAgainstAliases(SearchRequest source, IndexRequest destinatio * it. This is the same sort of dance that TransportIndexRequest * uses to decide to autocreate the index. */ - target = indexNameExpressionResolver.concreteIndexNames(clusterState, destination)[0]; + target = indexNameExpressionResolver.concreteWriteIndex(clusterState, destination).getName(); } for (String sourceIndex : indexNameExpressionResolver.concreteIndexNames(clusterState, source)) { if (sourceIndex.equals(target)) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java index 19c5739bbc6ce..8264d4342c993 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -48,6 +49,9 @@ public class ReindexSourceTargetValidationTests extends ESTestCase { private static final ClusterState STATE = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder() .put(index("target", "target_alias", "target_multi"), true) .put(index("target2", "target_multi"), true) + .put(index("target_with_write_index", true, "target_multi_with_write_index"), true) + .put(index("target2_without_write_index", "target_multi_with_write_index"), true) + .put(index("qux", false, "target_alias_with_write_index_disabled"), true) .put(index("foo"), true) .put(index("bar"), true) .put(index("baz"), true) @@ -78,12 +82,26 @@ public void testAliasesContainTarget() { succeeds("target", "source", "source2", "source_multi"); } - public void testTargetIsAlias() { + public void testTargetIsAliasToMultipleIndicesWithoutWriteAlias() { Exception e = expectThrows(IllegalArgumentException.class, () -> succeeds("target_multi", "foo")); - assertThat(e.getMessage(), containsString("Alias [target_multi] has more than one indices associated with it [[")); - // The index names can come in either order - assertThat(e.getMessage(), containsString("target")); - assertThat(e.getMessage(), containsString("target2")); + assertThat(e.getMessage(), containsString("no write index is defined for alias [target_multi]. The write index may be explicitly " + + "disabled using is_write_index=false or the alias points to multiple indices without one being designated as a " + + "write index")); + } + + public void testTargetIsAliasWithWriteIndexDisabled() { + Exception e = expectThrows(IllegalArgumentException.class, () -> succeeds("target_alias_with_write_index_disabled", "foo")); + assertThat(e.getMessage(), containsString("no write index is defined for alias [target_alias_with_write_index_disabled]. " + + "The write index may be explicitly disabled using is_write_index=false or the alias points to multiple indices without one " + + "being designated as a write index")); + succeeds("qux", "foo"); // writing directly into the index of which this is the alias works though + } + + public void testTargetIsWriteAlias() { + succeeds("target_multi_with_write_index", "foo"); + succeeds("target_multi_with_write_index", "target2_without_write_index"); + fails("target_multi_with_write_index", "target_multi_with_write_index"); + fails("target_multi_with_write_index", "target_with_write_index"); } public void testRemoteInfoSkipsValidation() { @@ -97,7 +115,7 @@ public void testRemoteInfoSkipsValidation() { private void fails(String target, String... sources) { Exception e = expectThrows(ActionRequestValidationException.class, () -> succeeds(target, sources)); - assertThat(e.getMessage(), containsString("reindex cannot write into an index its reading from [target]")); + assertThat(e.getMessage(), containsString("reindex cannot write into an index its reading from")); } private void succeeds(String target, String... sources) { @@ -110,12 +128,16 @@ private void succeeds(RemoteInfo remoteInfo, String target, String... sources) { } private static IndexMetaData index(String name, String... aliases) { + return index(name, null, aliases); + } + + private static IndexMetaData index(String name, @Nullable Boolean writeIndex, String... aliases) { IndexMetaData.Builder builder = IndexMetaData.builder(name).settings(Settings.builder() .put("index.version.created", Version.CURRENT.id) .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1)); for (String alias: aliases) { - builder.putAlias(AliasMetaData.builder(alias).build()); + builder.putAlias(AliasMetaData.builder(alias).writeIndex(writeIndex).build()); } return builder.build(); } From 2de64487818543252757a64d1e0bd9e968e00b0a Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 8 May 2019 09:28:27 +0200 Subject: [PATCH 007/321] Highlight the use of single-node discovery in docker docs (#41241) Relates to https://discuss.elastic.co/t/es-7-and-docker-single-node-cluster/176585 --- docs/reference/setup/install/docker.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 172c7c1f17c2f..1fcc261d68e1f 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -62,6 +62,9 @@ ifeval::["{release-state}"!="unreleased"] docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" {docker-image} -------------------------------------------- +Note the use of <> that allows bypassing +the <> in a single-node development cluster. + endif::[] [[docker-cli-run-prod-mode]] From 4e24ef1b66988e64eb93ca4b1183ca670da890e5 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 8 May 2019 01:08:03 -0700 Subject: [PATCH 008/321] Fix edge case of empty map immutability (#41925) This commit alters the immutability check of the new MapsTests to account for an empty map randomly created. Previously if an empty map was passed in, the test would fail due to randomFrom not having anything to choose from in the keyset. --- .../elasticsearch/common/util/MapsTests.java | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/common/util/MapsTests.java b/server/src/test/java/org/elasticsearch/common/util/MapsTests.java index a8338c7e4c38d..6edffee268ecc 100644 --- a/server/src/test/java/org/elasticsearch/common/util/MapsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/MapsTests.java @@ -114,9 +114,22 @@ private void assertMapEntries(final Map map, final Collection map) { + final String nonPresentKey = randomValueOtherThanMany(map.keySet()::contains, () -> randomAlphaOfLength(16)); + assertUnsupported("putting a map entry should be unsupported", () -> map.put(nonPresentKey, randomAlphaOfLength(16))); + assertUnsupported("putting map entries should be unsupported", () -> map.putAll(Map.of(nonPresentKey, randomAlphaOfLength(16)))); + assertUnsupported( + "computing a new map association should be unsupported", + () -> map.compute(nonPresentKey, (k, v) -> randomAlphaOfLength(16))); + assertUnsupported( + "computing a new map association should be unsupported", + () -> map.computeIfAbsent(nonPresentKey, k -> randomAlphaOfLength(16))); + assertUnsupported("map merge should be unsupported", () -> map.merge(nonPresentKey, randomAlphaOfLength(16), (k, v) -> v)); + if (map.isEmpty()) { + return; + } + final String presentKey = randomFrom(map.keySet()); final String valueNotAssociatedWithPresentKey = randomValueOtherThan(map.get(presentKey), () -> randomAlphaOfLength(16)); - final String nonPresentKey = randomValueOtherThanMany(map.keySet()::contains, () -> randomAlphaOfLength(16)); assertUnsupported("clearing map should be unsupported", map::clear); assertUnsupported("replacing a map entry should be unsupported", () -> map.replace(presentKey, valueNotAssociatedWithPresentKey)); assertUnsupported( @@ -125,24 +138,15 @@ private void assertMapImmutability(final Map map) { assertUnsupported("replacing map entries should be unsupported", () -> map.replaceAll((k, v) -> v + v)); assertUnsupported("removing a map entry should be unsupported", () -> map.remove(presentKey)); assertUnsupported("removing a map entry should be unsupported", () -> map.remove(presentKey, map.get(presentKey))); - assertUnsupported("putting a map entry should be unsupported", () -> map.put(nonPresentKey, randomAlphaOfLength(16))); - assertUnsupported("putting map entries should be unsupported", () -> map.putAll(Map.of(nonPresentKey, randomAlphaOfLength(16)))); assertUnsupported( "computing a new map association should be unsupported", () -> map.compute(presentKey, (k, v) -> randomBoolean() ? null : v + v)); - assertUnsupported( - "computing a new map association should be unsupported", - () -> map.compute(nonPresentKey, (k, v) -> randomAlphaOfLength(16))); - assertUnsupported( - "computing a new map association should be unsupported", - () -> map.computeIfAbsent(nonPresentKey, k -> randomAlphaOfLength(16))); assertUnsupported( "computing a new map association should be unsupported", () -> map.computeIfPresent(presentKey, (k, v) -> randomBoolean() ? null : v + v)); assertUnsupported( "map merge should be unsupported", () -> map.merge(presentKey, map.get(presentKey), (k, v) -> randomBoolean() ? null : v + v)); - assertUnsupported("map merge should be unsupported", () -> map.merge(nonPresentKey, randomAlphaOfLength(16), (k, v) -> v)); } private void assertUnsupported(final String message, final ThrowingRunnable runnable) { From 42050e95250f372d3a2f35be633b65c082efa29e Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 8 May 2019 11:00:59 +0200 Subject: [PATCH 009/321] Cut over SearchPhaseResult to Writeable (#41853) Relates to #34389 --- .../search/SearchPhaseResult.java | 22 ++++++-- .../elasticsearch/search/SearchService.java | 9 +-- .../search/dfs/DfsSearchResult.java | 55 ++++++------------- .../search/fetch/FetchSearchResult.java | 17 +----- .../search/fetch/QueryFetchSearchResult.java | 27 ++------- .../fetch/ScrollQueryFetchSearchResult.java | 20 ++----- .../search/query/QuerySearchResult.java | 17 +----- .../search/query/ScrollQuerySearchResult.java | 20 ++----- .../action/search/SearchAsyncActionTests.java | 6 -- .../search/query/QuerySearchResultTests.java | 2 +- 10 files changed, 60 insertions(+), 135 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java b/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java index f242cbd8eb901..39e6adede885b 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java +++ b/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java @@ -19,11 +19,13 @@ package org.elasticsearch.search; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.transport.TransportResponse; +import java.io.IOException; + /** * This class is a base class for all search related results. It contains the shard target it * was executed against, a shard index used to reference the result on the coordinating node @@ -32,15 +34,22 @@ * across search phases to ensure the same point in time snapshot is used for querying and * fetching etc. */ -public abstract class SearchPhaseResult extends TransportResponse implements Streamable { +public abstract class SearchPhaseResult extends TransportResponse { private SearchShardTarget searchShardTarget; private int shardIndex = -1; protected long requestId; + protected SearchPhaseResult() { + + } + + protected SearchPhaseResult(StreamInput in) throws IOException { + super(in); + } + /** - * Returns the results request ID that is used to reference the search context on the executing - * node + * Returns the results request ID that is used to reference the search context on the executing node */ public long getRequestId() { return requestId; @@ -79,4 +88,9 @@ public QuerySearchResult queryResult() { * Returns the fetch result iff it's included in this response otherwise null */ public FetchSearchResult fetchResult() { return null; } + + @Override + public final void readFrom(StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 0b22f5d660655..8cf3138212f7f 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -1093,9 +1093,10 @@ public InternalAggregation.ReduceContext createReduceContext(boolean finalReduce } public static final class CanMatchResponse extends SearchPhaseResult { - private boolean canMatch; + private final boolean canMatch; public CanMatchResponse(StreamInput in) throws IOException { + super(in); this.canMatch = in.readBoolean(); } @@ -1103,12 +1104,6 @@ public CanMatchResponse(boolean canMatch) { this.canMatch = canMatch; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - canMatch = in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java index dbe566a4ddbb2..1d61cd565d69c 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java @@ -42,11 +42,22 @@ public class DfsSearchResult extends SearchPhaseResult { private ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(); private int maxDoc; - public DfsSearchResult() { - } - public DfsSearchResult(StreamInput in) throws IOException { - readFrom(in); + super(in); + requestId = in.readLong(); + int termsSize = in.readVInt(); + if (termsSize == 0) { + terms = EMPTY_TERMS; + } else { + terms = new Term[termsSize]; + for (int i = 0; i < terms.length; i++) { + terms[i] = new Term(in.readString(), in.readBytesRef()); + } + } + this.termStatistics = readTermStats(in, terms); + fieldStatistics = readFieldStats(in); + + maxDoc = in.readVInt(); } public DfsSearchResult(long id, SearchShardTarget shardTarget) { @@ -86,26 +97,6 @@ public ObjectObjectHashMap fieldStatistics() { return fieldStatistics; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - requestId = in.readLong(); - int termsSize = in.readVInt(); - if (termsSize == 0) { - terms = EMPTY_TERMS; - } else { - terms = new Term[termsSize]; - for (int i = 0; i < terms.length; i++) { - terms[i] = new Term(in.readString(), in.readBytesRef()); - } - } - this.termStatistics = readTermStats(in, terms); - readFieldStats(in, fieldStatistics); - - - maxDoc = in.readVInt(); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -154,16 +145,9 @@ public static void writeSingleTermStats(StreamOutput out, TermStatistics termSt } } - public static ObjectObjectHashMap readFieldStats(StreamInput in) throws IOException { - return readFieldStats(in, null); - } - - public static ObjectObjectHashMap readFieldStats(StreamInput in, - ObjectObjectHashMap fieldStatistics) throws IOException { + static ObjectObjectHashMap readFieldStats(StreamInput in) throws IOException { final int numFieldStatistics = in.readVInt(); - if (fieldStatistics == null) { - fieldStatistics = HppcMaps.newNoNullKeysMap(numFieldStatistics); - } + ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(numFieldStatistics); for (int i = 0; i < numFieldStatistics; i++) { final String field = in.readString(); assert field != null; @@ -178,7 +162,7 @@ public static ObjectObjectHashMap readFieldStats(S return fieldStatistics; } - public static TermStatistics[] readTermStats(StreamInput in, Term[] terms) throws IOException { + static TermStatistics[] readTermStats(StreamInput in, Term[] terms) throws IOException { int termsStatsSize = in.readVInt(); final TermStatistics[] termStatistics; if (termsStatsSize == 0) { @@ -200,7 +184,6 @@ public static TermStatistics[] readTermStats(StreamInput in, Term[] terms) throw return termStatistics; } - /* * optional statistics are set to -1 in lucene by default. * Since we are using var longs to encode values we add one to each value @@ -211,7 +194,6 @@ public static long addOne(long value) { return value + 1; } - /* * See #addOne this just subtracting one and asserts that the actual value * is positive. @@ -220,5 +202,4 @@ public static long subOne(long value) { assert value >= 0; return value - 1; } - } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java index 400ab3623c0e8..6e183f1148384 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java @@ -39,7 +39,9 @@ public FetchSearchResult() { } public FetchSearchResult(StreamInput in) throws IOException { - readFrom(in); + super(in); + requestId = in.readLong(); + hits = new SearchHits(in); } public FetchSearchResult(long id, SearchShardTarget shardTarget) { @@ -82,19 +84,6 @@ public int counterGetAndIncrement() { return counter++; } - public static FetchSearchResult readFetchSearchResult(StreamInput in) throws IOException { - FetchSearchResult result = new FetchSearchResult(); - result.readFrom(in); - return result; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - requestId = in.readLong(); - hits = new SearchHits(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java index 0a5a7cec375db..e87bd6d5e9d63 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java @@ -27,19 +27,15 @@ import java.io.IOException; -import static org.elasticsearch.search.fetch.FetchSearchResult.readFetchSearchResult; -import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult; - public final class QueryFetchSearchResult extends SearchPhaseResult { - private QuerySearchResult queryResult; - private FetchSearchResult fetchResult; - - public QueryFetchSearchResult() { - } + private final QuerySearchResult queryResult; + private final FetchSearchResult fetchResult; public QueryFetchSearchResult(StreamInput in) throws IOException { - readFrom(in); + super(in); + queryResult = new QuerySearchResult(in); + fetchResult = new FetchSearchResult(in); } public QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult fetchResult) { @@ -81,19 +77,6 @@ public FetchSearchResult fetchResult() { return fetchResult; } - public static QueryFetchSearchResult readQueryFetchSearchResult(StreamInput in) throws IOException { - QueryFetchSearchResult result = new QueryFetchSearchResult(); - result.readFrom(in); - return result; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - queryResult = readQuerySearchResult(in); - fetchResult = readFetchSearchResult(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java index 6b0a8b619bff3..0785fafdc2189 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java @@ -27,17 +27,15 @@ import java.io.IOException; -import static org.elasticsearch.search.fetch.QueryFetchSearchResult.readQueryFetchSearchResult; - public final class ScrollQueryFetchSearchResult extends SearchPhaseResult { - private QueryFetchSearchResult result; - - public ScrollQueryFetchSearchResult() { - } + private final QueryFetchSearchResult result; public ScrollQueryFetchSearchResult(StreamInput in) throws IOException { - readFrom(in); + super(in); + SearchShardTarget searchShardTarget = new SearchShardTarget(in); + result = new QueryFetchSearchResult(in); + setSearchShardTarget(searchShardTarget); } public ScrollQueryFetchSearchResult(QueryFetchSearchResult result, SearchShardTarget shardTarget) { @@ -71,14 +69,6 @@ public FetchSearchResult fetchResult() { return result.fetchResult(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - SearchShardTarget searchShardTarget = new SearchShardTarget(in); - result = readQueryFetchSearchResult(in); - setSearchShardTarget(searchShardTarget); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 16236c64291cc..cde180fd7ef4d 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -67,7 +67,9 @@ public QuerySearchResult() { } public QuerySearchResult(StreamInput in) throws IOException { - readFrom(in); + super(in); + long id = in.readLong(); + readFromWithId(id, in); } public QuerySearchResult(long id, SearchShardTarget shardTarget) { @@ -256,19 +258,6 @@ public boolean hasSearchContext() { return hasScoreDocs || hasSuggestHits(); } - public static QuerySearchResult readQuerySearchResult(StreamInput in) throws IOException { - QuerySearchResult result = new QuerySearchResult(); - result.readFrom(in); - return result; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - long id = in.readLong(); - readFromWithId(id, in); - } - public void readFromWithId(long id, StreamInput in) throws IOException { this.requestId = id; from = in.readVInt(); diff --git a/server/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java index 632d148ea901b..fe8bad3d098e4 100644 --- a/server/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java @@ -26,17 +26,15 @@ import java.io.IOException; -import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult; - public final class ScrollQuerySearchResult extends SearchPhaseResult { - private QuerySearchResult result; - - public ScrollQuerySearchResult() { - } + private final QuerySearchResult result; public ScrollQuerySearchResult(StreamInput in) throws IOException { - readFrom(in); + super(in); + SearchShardTarget shardTarget = new SearchShardTarget(in); + result = new QuerySearchResult(in); + setSearchShardTarget(shardTarget); } public ScrollQuerySearchResult(QuerySearchResult result, SearchShardTarget shardTarget) { @@ -61,14 +59,6 @@ public QuerySearchResult queryResult() { return result; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - SearchShardTarget shardTarget = new SearchShardTarget(in); - result = readQuerySearchResult(in); - setSearchShardTarget(shardTarget); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 21ac0cdf636d2..23abefea15fc7 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; @@ -418,11 +417,6 @@ public TestSearchPhaseResult(long id, DiscoveryNode node) { this.node = node; } - @Override - public void readFrom(StreamInput in) throws IOException { - - } - @Override public void writeTo(StreamOutput out) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java index 64712b3e417a0..efbe858e1899a 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java @@ -74,7 +74,7 @@ private static QuerySearchResult createTestInstance() throws Exception { public void testSerialization() throws Exception { QuerySearchResult querySearchResult = createTestInstance(); Version version = VersionUtils.randomVersion(random()); - QuerySearchResult deserialized = copyStreamable(querySearchResult, namedWriteableRegistry, QuerySearchResult::new, version); + QuerySearchResult deserialized = copyWriteable(querySearchResult, namedWriteableRegistry, QuerySearchResult::new, version); assertEquals(querySearchResult.getRequestId(), deserialized.getRequestId()); assertNull(deserialized.getSearchShardTarget()); assertEquals(querySearchResult.topDocs().maxScore, deserialized.topDocs().maxScore, 0f); From 1e762a137ead9709861245720e4bc055a61e7bc3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 8 May 2019 10:23:55 +0100 Subject: [PATCH 010/321] Node names in bootstrap config have no ports (#41569) In cases where node names and transport addresses can be muddled, it is unclear that `cluster.initial_master_nodes: master-a:9300` means to look for a node called `master-a:9300` rather than a node called `master-a` with transport port `9300`. This commit adds docs to that effect. --- .../modules/discovery/bootstrapping.asciidoc | 29 +++++++++++++------ .../discovery-settings.asciidoc | 27 ++++++++--------- 2 files changed, 32 insertions(+), 24 deletions(-) diff --git a/docs/reference/modules/discovery/bootstrapping.asciidoc b/docs/reference/modules/discovery/bootstrapping.asciidoc index 0ba7d4b17cef7..2b17af17ec5da 100644 --- a/docs/reference/modules/discovery/bootstrapping.asciidoc +++ b/docs/reference/modules/discovery/bootstrapping.asciidoc @@ -10,11 +10,22 @@ data folder and freshly-started nodes that are joining an existing cluster obtain this information from the cluster's elected master. The initial set of master-eligible nodes is defined in the -<>. This is a list -of the <> or IP addresses of the master-eligible nodes in -the new cluster. If you do not configure `node.name` then it is set to the -node's hostname, so in this case you can use hostnames in -`cluster.initial_master_nodes` too. +<>. This should be +set to a list containing one of the following items for each master-eligible +node: + +- The <> of the node. +- The node's hostname if `node.name` is not set, because `node.name` defaults + to the node's hostname. You must use either the fully-qualified hostname or + the bare hostname <>. +- The IP address of the node's <>, if it is + not possible to use the `node.name` of the node. This is normally the IP + address to which <> resolves but + <>. +- The IP address and port of the node's publish address, in the form `IP:PORT`, + if it is not possible to use the `node.name` of the node and there are + multiple nodes sharing a single IP address. When you start a master-eligible node, you can provide this setting on the command line or in the `elasticsearch.yml` file. After the cluster has formed, @@ -47,9 +58,9 @@ cluster.initial_master_nodes: - master-c -------------------------------------------------- -You can use a mix of IP addresses and node names too. If there is more than one -Elasticsearch node with the same IP address then the transport port must also -be given to specify exactly which node is meant: +If it is not possible to use the names of the nodes then you can also use IP +addresses, or IP addresses and ports, or even a mix of IP addresses and node +names: [source,yaml] -------------------------------------------------- @@ -57,7 +68,7 @@ cluster.initial_master_nodes: - 10.0.10.101 - 10.0.10.102:9300 - 10.0.10.102:9301 - - master-node-hostname + - master-node-name -------------------------------------------------- Like all node settings, it is also possible to specify the initial set of master diff --git a/docs/reference/setup/important-settings/discovery-settings.asciidoc b/docs/reference/setup/important-settings/discovery-settings.asciidoc index 5709ae3bb9345..245852b209609 100644 --- a/docs/reference/setup/important-settings/discovery-settings.asciidoc +++ b/docs/reference/setup/important-settings/discovery-settings.asciidoc @@ -49,26 +49,23 @@ discovery.seed_hosts: - 192.168.1.10:9300 - 192.168.1.11 <1> - seeds.mydomain.com <2> -cluster.initial_master_nodes: - - master-node-a <3> - - 192.168.1.12 <4> - - 192.168.1.13:9301 <5> +cluster.initial_master_nodes: <3> + - master-node-a + - master-node-b + - master-node-c -------------------------------------------------- <1> The port will default to `transport.profiles.default.port` and fallback to `transport.port` if not specified. <2> If a hostname resolves to multiple IP addresses then the node will attempt to discover other nodes at all resolved addresses. -<3> Initial master nodes can be identified by their <>, - which defaults to the hostname. Make sure that the value in - `cluster.initial_master_nodes` matches the `node.name` exactly. If you use - a fully-qualified domain name such as `master-node-a.example.com` for your - node names then you must use the fully-qualified name in this list; - conversely if `node.name` is a bare hostname without any trailing - qualifiers then you must also omit the trailing qualifiers in - `cluster.initial_master_nodes`. -<4> Initial master nodes can also be identified by their IP address. -<5> If multiple master nodes share an IP address then the transport port must - be used to distinguish between them. +<3> The initial master nodes should be identified by their + <>, which defaults to their hostname. Make sure that + the value in `cluster.initial_master_nodes` matches the `node.name` + exactly. If you use a fully-qualified domain name such as + `master-node-a.example.com` for your node names then you must use the + fully-qualified name in this list; conversely if `node.name` is a bare + hostname without any trailing qualifiers then you must also omit the + trailing qualifiers in `cluster.initial_master_nodes`. For more information, see <> and <>. From f15a1cdbc538bb49a158c25191626174730d324a Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Wed, 8 May 2019 12:25:58 +0200 Subject: [PATCH 011/321] Always set terminated_early if terminate_after is set in the search request (#40839) * terminated_early should always be set in the response with terminate_after Today we set `terminated_early` to true in the response if the query terminated early due to `terminate_after`. However if `terminate_after` is smaller than the number of documents in a shard we don't set the flag in the response indicating that the query was exhaustive. This change fixes this disprepancy by setting terminated_early to false in the response if the number of documents that match the query is smaller than the provided `terminate_after` value. Closes #33949 --- .../elasticsearch/search/query/QueryPhase.java | 4 ++++ .../search/query/QueryPhaseTests.java | 15 +++++++++++++-- .../search/simple/SimpleSearchIT.java | 2 +- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index bc4aa05d30537..64621277f6e6f 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -281,6 +281,10 @@ static boolean execute(SearchContext searchContext, } finally { searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION); } + if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER + && queryResult.terminatedEarly() == null) { + queryResult.terminatedEarly(false); + } final QuerySearchResult result = searchContext.queryResult(); for (QueryCollectorContext ctx : collectors) { diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index a5061c35d7fcf..582e1caa7ce87 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -353,11 +353,23 @@ public void testTerminateAfterEarlyTermination() throws Exception { TestSearchContext context = new TestSearchContext(null, indexShard); context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); - context.terminateAfter(1); final IndexReader reader = DirectoryReader.open(dir); IndexSearcher contextSearcher = new IndexSearcher(reader); + context.terminateAfter(numDocs); + { + context.setSize(10); + TotalHitCountCollector collector = new TotalHitCountCollector(); + context.queryCollectors().put(TotalHitCountCollector.class, collector); + QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); + assertFalse(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(10)); + assertThat(collector.getTotalHits(), equalTo(numDocs)); + } + + context.terminateAfter(1); { context.setSize(1); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); @@ -419,7 +431,6 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); assertThat(collector.getTotalHits(), equalTo(1)); } - reader.close(); dir.close(); } diff --git a/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java index 3e546d1720281..74b0408636f9b 100644 --- a/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -250,7 +250,7 @@ public void testSimpleTerminateAfterCount() throws Exception { .setTerminateAfter(2 * max).get(); assertHitCount(searchResponse, max); - assertNull(searchResponse.isTerminatedEarly()); + assertFalse(searchResponse.isTerminatedEarly()); } public void testSimpleIndexSortEarlyTerminate() throws Exception { From fa98c5ec60f4ea38785f1b598d3bcddb5c6085be Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Wed, 8 May 2019 14:00:11 +0300 Subject: [PATCH 012/321] Testclusters: support for security and convert example plugins (#41864) testclusters detect from settings that security is enabled if a user is not specified using the DSL introduced in this PR, a default one is created the appropriate wait conditions are used authenticating with the first user defined in the DSL ( or the default user ). an example DSL to create a user is user username:"test_user" password:"x-pack-test-password" role: "superuser" all keys are optional and default to the values shown in this example --- .../gradle/test/RestIntegTestTask.groovy | 2 +- .../gradle/http/WaitForHttpResource.java | 3 +- .../testclusters/ElasticsearchCluster.java | 43 ++++++++------- .../testclusters/ElasticsearchNode.java | 54 ++++++++++++++++++- .../TestClusterConfiguration.java | 11 ++-- distribution/archives/build.gradle | 20 ++++--- plugins/examples/custom-settings/build.gradle | 7 +-- .../examples/custom-suggester/build.gradle | 6 +-- .../examples/painless-whitelist/build.gradle | 5 +- plugins/examples/rescore/build.gradle | 1 + plugins/examples/rest-handler/build.gradle | 9 ++-- .../script-expert-scoring/build.gradle | 1 + .../build.gradle | 26 +++------ .../build.gradle | 1 + 14 files changed, 118 insertions(+), 71 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index eec46f9a522a7..ef784b6f901d1 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -70,7 +70,7 @@ class RestIntegTestTask extends DefaultTask { project.testClusters { "$name" { distribution = 'INTEG_TEST' - version = project.version + version = VersionProperties.elasticsearch javaHome = project.file(project.ext.runtimeJavaHome) } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java b/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java index a8680ef13dda0..3ac4a53910c26 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java @@ -123,8 +123,7 @@ public boolean wait(int durationInMs) throws GeneralSecurityException, Interrupt if (System.nanoTime() < waitUntil) { Thread.sleep(sleep); } else { - logger.error("Failed to access url [{}]", url, failure); - return false; + throw failure; } } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index 35b3bbd7481b6..0cb7ee0c10fc7 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -22,23 +22,22 @@ import org.elasticsearch.gradle.Distribution; import org.elasticsearch.gradle.FileSupplier; import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.http.WaitForHttpResource; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; -import java.io.BufferedReader; import java.io.File; import java.io.IOException; -import java.io.InputStreamReader; import java.io.UncheckedIOException; -import java.net.HttpURLConnection; import java.net.URI; -import java.net.URL; import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.security.GeneralSecurityException; import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -75,6 +74,8 @@ public ElasticsearchCluster(String path, String clusterName, Project project, Fi services, artifactsExtractDir, workingDirBase ) ); + + addWaitForClusterHealth(); } public void setNumberOfNodes(int numberOfNodes) { @@ -219,6 +220,11 @@ public void extraConfigFile(String destination, File from) { nodes.all(node -> node.extraConfigFile(destination, from)); } + @Override + public void user(Map userSpec) { + nodes.all(node -> node.user(userSpec)); + } + private void writeUnicastHostsFiles() { String unicastUris = nodes.stream().flatMap(node -> node.getAllTransportPortURI().stream()).collect(Collectors.joining("\n")); nodes.forEach(node -> { @@ -262,9 +268,6 @@ public void waitForAllConditions() { writeUnicastHostsFiles(); LOGGER.info("Starting to wait for cluster to form"); - addWaitForUri( - "cluster health yellow", "/_cluster/health?wait_for_nodes=>=" + nodes.size() + "&wait_for_status=yellow" - ); waitForConditions(waitConditions, startedAt, CLUSTER_UP_TIMEOUT, CLUSTER_UP_TIMEOUT_UNIT, this); } @@ -293,21 +296,25 @@ public ElasticsearchNode singleNode() { return getFirstNode(); } - private void addWaitForUri(String description, String uri) { - waitConditions.put(description, (node) -> { + private void addWaitForClusterHealth() { + waitConditions.put("cluster health yellow", (node) -> { try { - URL url = new URL("http://" + getFirstNode().getHttpSocketURI() + uri); - HttpURLConnection con = (HttpURLConnection) url.openConnection(); - con.setRequestMethod("GET"); - con.setConnectTimeout(500); - con.setReadTimeout(500); - try (BufferedReader reader = new BufferedReader(new InputStreamReader(con.getInputStream()))) { - String response = reader.lines().collect(Collectors.joining("\n")); - LOGGER.info("{} -> {} ->\n{}", this, uri, response); + WaitForHttpResource wait = new WaitForHttpResource( + "http", getFirstNode().getHttpSocketURI(), nodes.size() + ); + List> credentials = getFirstNode().getCredentials(); + if (getFirstNode().getCredentials().isEmpty() == false) { + wait.setUsername(credentials.get(0).get("useradd")); + wait.setPassword(credentials.get(0).get("-p")); } - return true; + return wait.wait(500); } catch (IOException e) { throw new IllegalStateException("Connection attempt to " + this + " failed", e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new TestClustersException("Interrupted while waiting for " + this, e); + } catch (GeneralSecurityException e) { + throw new RuntimeException("security exception", e); } }); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 596e205480cb0..3bb1fb2ddb6e3 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -38,6 +38,7 @@ import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; @@ -86,6 +87,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final Map> environment = new LinkedHashMap<>(); private final Map extraConfigFiles = new HashMap<>(); final LinkedHashMap defaultConfig = new LinkedHashMap<>(); + private final List> credentials = new ArrayList<>(); private final Path confPathRepo; private final Path configFile; @@ -117,8 +119,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { esStdoutFile = confPathLogs.resolve("es.stdout.log"); esStderrFile = confPathLogs.resolve("es.stderr.log"); tmpDir = workingDir.resolve("tmp"); - waitConditions.put("http ports file", node -> Files.exists(((ElasticsearchNode) node).httpPortsFile)); - waitConditions.put("transport ports file", node -> Files.exists(((ElasticsearchNode)node).transportPortFile)); + waitConditions.put("ports files", this::checkPortsFilesExistWithDelay); } public String getName() { @@ -319,9 +320,25 @@ public synchronized void start() { copyExtraConfigFiles(); + if (isSettingMissingOrTrue("xpack.security.enabled")) { + if (credentials.isEmpty()) { + user(Collections.emptyMap()); + } + credentials.forEach(paramMap -> runElaticsearchBinScript( + "elasticsearch-users", + paramMap.entrySet().stream() + .flatMap(entry -> Stream.of(entry.getKey(), entry.getValue())) + .toArray(String[]::new) + )); + } + startElasticsearchProcess(); } + private boolean isSettingMissingOrTrue(String name) { + return Boolean.valueOf(settings.getOrDefault(name, () -> "false").get().toString()); + } + private void copyExtraConfigFiles() { extraConfigFiles.forEach((destination, from) -> { if (Files.exists(from.toPath()) == false) { @@ -375,6 +392,22 @@ public void extraConfigFile(String destination, File from) { extraConfigFiles.put(destination, from); } + @Override + public void user(Map userSpec) { + Set keys = new HashSet<>(userSpec.keySet()); + keys.remove("username"); + keys.remove("password"); + keys.remove("role"); + if (keys.isEmpty() == false) { + throw new TestClustersException("Unknown keys in user definition " + keys + " for " + this); + } + Map cred = new LinkedHashMap<>(); + cred.put("useradd", userSpec.getOrDefault("username","test_user")); + cred.put("-p", userSpec.getOrDefault("password","x-pack-test-password")); + cred.put("-r", userSpec.getOrDefault("role", "superuser")); + credentials.add(cred); + } + private void runElaticsearchBinScriptWithInput(String input, String tool, String... args) { try (InputStream byteArrayInputStream = new ByteArrayInputStream(input.getBytes(StandardCharsets.UTF_8))) { services.loggedExec(spec -> { @@ -752,4 +785,21 @@ public int hashCode() { public String toString() { return "node{" + path + ":" + name + "}"; } + + List> getCredentials() { + return credentials; + } + + private boolean checkPortsFilesExistWithDelay(TestClusterConfiguration node) { + if (Files.exists(httpPortsFile) && Files.exists(transportPortFile)) { + return true; + } + try { + Thread.sleep(500); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new TestClustersException("Interrupted while waiting for ports files", e); + } + return Files.exists(httpPortsFile) && Files.exists(transportPortFile); + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java index 39f9683ff4863..628dadcbb9d37 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java @@ -27,6 +27,7 @@ import java.net.URI; import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; import java.util.function.Supplier; @@ -72,6 +73,8 @@ public interface TestClusterConfiguration { void extraConfigFile(String destination, File from); + void user(Map userSpec); + String getHttpSocketURI(); String getTransportPortURI(); @@ -108,7 +111,7 @@ default void waitForConditions( break; } } catch (TestClustersException e) { - throw new TestClustersException(e); + throw e; } catch (Exception e) { if (lastException == null) { lastException = e; @@ -116,12 +119,6 @@ default void waitForConditions( lastException = e; } } - try { - Thread.sleep(500); - } - catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } } if (conditionMet == false) { String message = "`" + context + "` failed to wait for " + description + " after " + diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index c7c58ad4ebdc5..ac5056716f9d9 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -295,6 +295,10 @@ subprojects { } } +subprojects { + group = "org.elasticsearch.distribution.${name.startsWith("oss-") ? "oss" : "default"}" +} + /***************************************************************************** * Rest test config * *****************************************************************************/ @@ -302,6 +306,8 @@ configure(subprojects.findAll { it.name == 'integ-test-zip' }) { apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' + group = "org.elasticsearch.distribution.integ-test-zip" + integTest { includePackaged = true } @@ -321,23 +327,14 @@ configure(subprojects.findAll { it.name == 'integ-test-zip' }) { inputs.properties(project(':distribution').restTestExpansions) MavenFilteringHack.filter(it, project(':distribution').restTestExpansions) } -} -/***************************************************************************** - * Maven config * - *****************************************************************************/ -configure(subprojects.findAll { it.name.contains('zip') }) { - // only zip distributions go to maven + + // The integ-test-distribution is published to maven BuildPlugin.configurePomGeneration(project) apply plugin: 'nebula.info-scm' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' - // note: the group must be correct before applying the nexus plugin, or - // it will capture the wrong value... - String subgroup = project.name == 'integ-test-zip' ? 'integ-test-zip' : 'zip' - project.group = "org.elasticsearch.distribution.${subgroup}" - // make the pom file name use elasticsearch instead of the project name archivesBaseName = "elasticsearch${it.name.contains('oss') ? '-oss' : ''}" @@ -378,3 +375,4 @@ configure(subprojects.findAll { it.name.contains('zip') }) { } } } + diff --git a/plugins/examples/custom-settings/build.gradle b/plugins/examples/custom-settings/build.gradle index 3caf29c8513b5..b750018fefe87 100644 --- a/plugins/examples/custom-settings/build.gradle +++ b/plugins/examples/custom-settings/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -26,7 +27,7 @@ esplugin { noticeFile rootProject.file('NOTICE.txt') } -integTestCluster { +testClusters.integTest { // Adds a setting in the Elasticsearch keystore before running the integration tests - keystoreSetting 'custom.secured', 'password' -} \ No newline at end of file + keystore 'custom.secured', 'password' +} diff --git a/plugins/examples/custom-suggester/build.gradle b/plugins/examples/custom-suggester/build.gradle index 977e467391d8b..a6861c8be6370 100644 --- a/plugins/examples/custom-suggester/build.gradle +++ b/plugins/examples/custom-suggester/build.gradle @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ - +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -27,8 +27,8 @@ esplugin { noticeFile rootProject.file('NOTICE.txt') } -integTestCluster { - numNodes = 2 +testClusters.integTest { + numberOfNodes = 2 } // this plugin has no unit tests, only rest tests diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index 95928c472ca0d..738a3be86afab 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -31,8 +32,8 @@ dependencies { compileOnly "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${versions.elasticsearch}" } -if (System.getProperty('tests.distribution') == null) { - integTestCluster.distribution = 'oss' +testClusters.integTest { + distribution = 'oss' } test.enabled = false diff --git a/plugins/examples/rescore/build.gradle b/plugins/examples/rescore/build.gradle index cdecd760c81e8..e18805bc5477c 100644 --- a/plugins/examples/rescore/build.gradle +++ b/plugins/examples/rescore/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle index 98dd093ac17a3..14a6189f9ad4e 100644 --- a/plugins/examples/rest-handler/build.gradle +++ b/plugins/examples/rest-handler/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -36,11 +37,11 @@ task exampleFixture(type: org.elasticsearch.gradle.test.AntFixture) { args 'org.elasticsearch.example.resthandler.ExampleFixture', baseDir, 'TEST' } -integTestCluster { +integTest { dependsOn exampleFixture -} -integTestRunner { - nonInputProperties.systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" + runner { + nonInputProperties.systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" + } } testingConventions.naming { diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle index e9da62acdcff4..6f88baccefc3d 100644 --- a/plugins/examples/script-expert-scoring/build.gradle +++ b/plugins/examples/script-expert-scoring/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { diff --git a/plugins/examples/security-authorization-engine/build.gradle b/plugins/examples/security-authorization-engine/build.gradle index f869e4872ddc3..fba9580525bcc 100644 --- a/plugins/examples/security-authorization-engine/build.gradle +++ b/plugins/examples/security-authorization-engine/build.gradle @@ -1,3 +1,4 @@ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -14,15 +15,14 @@ dependencies { testCompile "org.elasticsearch.client:x-pack-transport:${versions.elasticsearch}" } - -integTestRunner { +integTest { + dependsOn buildZip + runner { systemProperty 'tests.security.manager', 'false' + } } -integTestCluster { - dependsOn buildZip - distribution = 'default' - +testClusters.integTest { setting 'xpack.security.enabled', 'true' setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' @@ -34,17 +34,7 @@ integTestCluster { // processors are being used that are in ingest-common module. distribution = 'default' - setupCommand 'setupDummyUser', - 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'custom_superuser' - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'test_user', - password: 'x-pack-test-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } + user role: 'custom_superuser' } + check.dependsOn integTest diff --git a/x-pack/qa/security-setup-password-tests/build.gradle b/x-pack/qa/security-setup-password-tests/build.gradle index c0801a38b570c..a99fa2d543861 100644 --- a/x-pack/qa/security-setup-password-tests/build.gradle +++ b/x-pack/qa/security-setup-password-tests/build.gradle @@ -10,6 +10,7 @@ dependencies { integTestRunner { systemProperty 'tests.security.manager', 'false' + // TODO add tests.config.dir = {cluster.singleNode().getConfigDir()} when converting to testclusters } integTestCluster { From 0dd6b985c1dbc6372ba82cc2fb00a7273618e531 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 8 May 2019 12:08:47 +0100 Subject: [PATCH 013/321] Remove mention of bulk threadpool in examples (#41935) The `bulk` threadpool is now called `write`, but `bulk` is still used in some examples. This commit fixes that. Also, the only way `threadpool.bulk.write: 30` is a valid increase in the size of this threadpool is if you have 29 processors, which is an odd number of processors to have. This commit removes the "more threads" bit. --- docs/reference/modules/threadpool.asciidoc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index d0f68e37730b8..3bea925f972e5 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -52,13 +52,14 @@ There are several thread pools, but the important ones include: Mainly for java client executing of action when listener threaded is set to true. Thread pool type is `scaling` with a default max of `min(10, (# of available processors)/2)`. -Changing a specific thread pool can be done by setting its type-specific parameters; for example, changing the `bulk` -thread pool to have more threads: +Changing a specific thread pool can be done by setting its type-specific +parameters; for example, changing the number of threads in the `write` thread +pool: [source,yaml] -------------------------------------------------- thread_pool: - bulk: + write: size: 30 -------------------------------------------------- @@ -87,7 +88,7 @@ full, it will abort the request. [source,yaml] -------------------------------------------------- thread_pool: - bulk: + write: size: 30 queue_size: 1000 -------------------------------------------------- From f6df9286bd340a1fb08970a4ce573be9da41e43a Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 8 May 2019 12:23:49 +0100 Subject: [PATCH 014/321] [ML Data Frame] Set executing nodes in task actions (#41798) Direct the task request to the node executing the task and also refactor the task responses so all errors are returned and set the HTTP status code based on presence of errors. --- .../action/StopDataFrameTransformAction.java | 10 ++++ .../dataframe/action/DataFrameNodes.java | 50 +++++++++++++++++++ ...portGetDataFrameTransformsStatsAction.java | 1 + ...TransportStopDataFrameTransformAction.java | 49 ++---------------- .../BaseTasksResponseToXContentListener.java | 29 +++++++++++ .../RestDeleteDataFrameTransformAction.java | 12 +---- ...RestGetDataFrameTransformsStatsAction.java | 4 +- .../RestStartDataFrameTransformAction.java | 4 +- .../RestStopDataFrameTransformAction.java | 4 +- ...ionTests.java => DataFrameNodesTests.java} | 11 ++-- 10 files changed, 106 insertions(+), 68 deletions(-) create mode 100644 x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodes.java create mode 100644 x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/BaseTasksResponseToXContentListener.java rename x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/{TransportStopDataFrameTransformActionTests.java => DataFrameNodesTests.java} (85%) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java index e7a43f252d637..99699c3a48cb5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java @@ -5,8 +5,10 @@ */ package org.elasticsearch.xpack.core.dataframe.action; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.common.Nullable; @@ -24,6 +26,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -167,6 +170,13 @@ public Response(boolean stopped) { this.stopped = stopped; } + public Response(List taskFailures, + List nodeFailures, + boolean stopped) { + super(taskFailures, nodeFailures); + this.stopped = stopped; + } + public boolean isStopped() { return stopped; } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodes.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodes.java new file mode 100644 index 0000000000000..1b2c54b331f42 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodes.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; + +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public final class DataFrameNodes { + + private DataFrameNodes() { + } + + /** + * Get the list of nodes the data frames are executing on + * + * @param dataFrameIds The data frames. + * @param clusterState State + * @return The executor nodes + */ + public static String[] dataFrameTaskNodes(List dataFrameIds, ClusterState clusterState) { + + Set executorNodes = new HashSet<>(); + + PersistentTasksCustomMetaData tasksMetaData = + PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(clusterState); + + if (tasksMetaData != null) { + Set dataFrameIdsSet = new HashSet<>(dataFrameIds); + + Collection> tasks = + tasksMetaData.findTasks(DataFrameField.TASK_NAME, t -> dataFrameIdsSet.contains(t.getId())); + + for (PersistentTasksCustomMetaData.PersistentTask task : tasks) { + executorNodes.add(task.getExecutorNode()); + } + } + + return executorNodes.toArray(new String[0]); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java index 7ec5beb1131d4..7ab5f28001407 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java @@ -124,6 +124,7 @@ protected void doExecute(Task task, Request request, ActionListener fi dataFrameTransformsConfigManager.expandTransformIds(request.getId(), request.getPageParams(), ActionListener.wrap( ids -> { request.setExpandedIds(ids); + request.setNodes(DataFrameNodes.dataFrameTaskNodes(ids, clusterService.state())); super.doExecute(task, request, ActionListener.wrap( response -> collectStatsForTransformsWithoutTasks(request, response, finalListener), finalListener::onFailure diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java index 2092493caaf4c..120f1ef77596b 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java @@ -13,29 +13,24 @@ import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.action.util.PageParams; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; -import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; -import static org.elasticsearch.ExceptionsHelper.convertToElastic; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; public class TransportStopDataFrameTransformAction extends @@ -63,7 +58,7 @@ protected void doExecute(Task task, StopDataFrameTransformAction.Request request dataFrameTransformsConfigManager.expandTransformIds(request.getId(), new PageParams(0, 10_000), ActionListener.wrap( expandedIds -> { request.setExpandedIds(new HashSet<>(expandedIds)); - request.setNodes(dataframeNodes(expandedIds, clusterService.state())); + request.setNodes(DataFrameNodes.dataFrameTaskNodes(expandedIds, clusterService.state())); super.doExecute(task, request, listener); }, listener::onFailure @@ -136,48 +131,12 @@ protected StopDataFrameTransformAction.Response newResponse(StopDataFrameTransfo List tasks, List taskOperationFailures, List failedNodeExceptions) { - if (taskOperationFailures.isEmpty() == false) { - throw convertToElastic(taskOperationFailures.get(0).getCause()); - } else if (failedNodeExceptions.isEmpty() == false) { - throw convertToElastic(failedNodeExceptions.get(0)); - } - - // Either the transform doesn't exist (the user didn't create it yet) or was deleted - // after the Stop API executed. - // In either case, let the user know - if (tasks.size() == 0) { - if (taskOperationFailures.isEmpty() == false) { - throw convertToElastic(taskOperationFailures.get(0).getCause()); - } else if (failedNodeExceptions.isEmpty() == false) { - throw convertToElastic(failedNodeExceptions.get(0)); - } else { - // This can happen we the actual task in the node no longer exists, or was never started - return new StopDataFrameTransformAction.Response(true); - } + if (taskOperationFailures.isEmpty() == false || failedNodeExceptions.isEmpty() == false) { + return new StopDataFrameTransformAction.Response(taskOperationFailures, failedNodeExceptions, false); } + // if tasks is empty allMatch is 'vacuously satisfied' boolean allStopped = tasks.stream().allMatch(StopDataFrameTransformAction.Response::isStopped); return new StopDataFrameTransformAction.Response(allStopped); } - - static String[] dataframeNodes(List dataFrameIds, ClusterState clusterState) { - - Set executorNodes = new HashSet<>(); - - PersistentTasksCustomMetaData tasksMetaData = - PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(clusterState); - - if (tasksMetaData != null) { - Set dataFrameIdsSet = new HashSet<>(dataFrameIds); - - Collection> tasks = - tasksMetaData.findTasks(DataFrameField.TASK_NAME, t -> dataFrameIdsSet.contains(t.getId())); - - for (PersistentTasksCustomMetaData.PersistentTask task : tasks) { - executorNodes.add(task.getExecutorNode()); - } - } - - return executorNodes.toArray(new String[0]); - } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/BaseTasksResponseToXContentListener.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/BaseTasksResponseToXContentListener.java new file mode 100644 index 0000000000000..def26a52efb87 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/BaseTasksResponseToXContentListener.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.rest.action; + +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; + + +class BaseTasksResponseToXContentListener extends RestToXContentListener { + + BaseTasksResponseToXContentListener(RestChannel channel) { + super(channel); + } + + @Override + protected RestStatus getStatus(T response) { + if (response.getNodeFailures().size() > 0 || response.getTaskFailures().size() > 0) { + return RestStatus.INTERNAL_SERVER_ERROR; + } + return RestStatus.OK; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java index 0efa3ffa2c5fd..183952e060338 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java @@ -11,8 +11,6 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; @@ -35,15 +33,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient DeleteDataFrameTransformAction.Request request = new DeleteDataFrameTransformAction.Request(id); return channel -> client.execute(DeleteDataFrameTransformAction.INSTANCE, request, - new RestToXContentListener(channel) { - @Override - protected RestStatus getStatus(DeleteDataFrameTransformAction.Response response) { - if (response.getNodeFailures().size() > 0 || response.getTaskFailures().size() > 0) { - return RestStatus.INTERNAL_SERVER_ERROR; - } - return RestStatus.OK; - } - }); + new BaseTasksResponseToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java index 87cc13edbc329..f2d14f8106958 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; @@ -33,7 +32,8 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient new PageParams(restRequest.paramAsInt(PageParams.FROM.getPreferredName(), PageParams.DEFAULT_FROM), restRequest.paramAsInt(PageParams.SIZE.getPreferredName(), PageParams.DEFAULT_SIZE))); } - return channel -> client.execute(GetDataFrameTransformsStatsAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute(GetDataFrameTransformsStatsAction.INSTANCE, request, + new BaseTasksResponseToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java index 1d9b3f29a6133..764aeca4a6480 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java @@ -12,7 +12,6 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction; @@ -31,7 +30,8 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient boolean force = restRequest.paramAsBoolean(DataFrameField.FORCE.getPreferredName(), false); StartDataFrameTransformAction.Request request = new StartDataFrameTransformAction.Request(id, force); request.timeout(restRequest.paramAsTime(DataFrameField.TIMEOUT.getPreferredName(), AcknowledgedRequest.DEFAULT_ACK_TIMEOUT)); - return channel -> client.execute(StartDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute(StartDataFrameTransformAction.INSTANCE, request, + new BaseTasksResponseToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java index e93898b905ba1..d34478b9ba941 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; @@ -34,7 +33,8 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient StopDataFrameTransformAction.Request request = new StopDataFrameTransformAction.Request(id, waitForCompletion, force, timeout); - return channel -> client.execute(StopDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute(StopDataFrameTransformAction.INSTANCE, request, + new BaseTasksResponseToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java similarity index 85% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java rename to x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java index ddc7ddd4f1b9c..ba549aa7e8bb4 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java @@ -18,13 +18,12 @@ import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; -import java.io.IOException; import java.util.Arrays; import java.util.Collections; import static org.hamcrest.Matchers.hasItemInArray; -public class TransportStopDataFrameTransformActionTests extends ESTestCase { +public class DataFrameNodesTests extends ESTestCase { public void testDataframeNodes() { String dataFrameIdFoo = "df-id-foo"; @@ -49,12 +48,12 @@ public Version getMinimalSupportedVersion() { } @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeTo(StreamOutput out) { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params) { return null; } }, @@ -64,7 +63,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) .build(); - String[] nodes = TransportStopDataFrameTransformAction.dataframeNodes(Arrays.asList(dataFrameIdFoo, dataFrameIdBar), cs); + String[] nodes = DataFrameNodes.dataFrameTaskNodes(Arrays.asList(dataFrameIdFoo, dataFrameIdBar), cs); assertEquals(2, nodes.length); assertThat(nodes, hasItemInArray("node-1")); assertThat(nodes, hasItemInArray("node-2")); @@ -72,7 +71,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public void testDataframeNodes_NoTasks() { ClusterState emptyState = ClusterState.builder(new ClusterName("_name")).build(); - String[] nodes = TransportStopDataFrameTransformAction.dataframeNodes(Collections.singletonList("df-id"), emptyState); + String[] nodes = DataFrameNodes.dataFrameTaskNodes(Collections.singletonList("df-id"), emptyState); assertEquals(0, nodes.length); } } From 9df539988a722cbcf1038331a9cef06d9cb28c07 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 8 May 2019 07:43:10 -0400 Subject: [PATCH 015/321] Fix master version in docs The docs are versioned as 8.0.0-alpha1 yet we are not currently publishing 8.0.0-alpha1 snapshots, instead 8.0.0 snapshots. We will only later qualify designated builds as 8.0.0-alpha1 at which point the docs version can be updated to reflect that. This commit updates the docs versions to 8.0.0. Closes #41941 --- docs/Versions.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index f728d76751dc3..224be0a0cee30 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,4 +1,4 @@ -:version: 8.0.0-alpha1 +:version: 8.0.0 //// bare_version never includes -alpha or -beta //// From c9e8beb3182f1c989b3ce2177fde5fa28f80f5ea Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 8 May 2019 13:57:01 +0200 Subject: [PATCH 016/321] Allow IDEA test runner to control number of test iterations (#41653) Allows configuring the number of test iterations via IntelliJ's config dialog, instead of having to add it manually via the tests.iters system property. --- .../org/elasticsearch/bootstrap/test-framework.policy | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index d6b15f3df43dc..b195662dcf20e 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -89,6 +89,11 @@ grant codeBase "${codebase.httpasyncclient}" { permission java.net.NetPermission "getProxySelector"; }; +grant codeBase "${codebase.junit-rt.jar}" { + // allows IntelliJ IDEA JUnit test runner to control number of test iterations + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; +}; + grant codeBase "file:${gradle.dist.lib}/-" { // gradle test worker code needs a slew of permissions, we give full access here since gradle isn't a production // dependency and there's no point in exercising the security policy against it @@ -104,4 +109,4 @@ grant codeBase "file:${gradle.worker.jar}" { grant { // since the gradle test worker jar is on the test classpath, our tests should be able to read it permission java.io.FilePermission "${gradle.worker.jar}", "read"; -}; \ No newline at end of file +}; From cfc12b4bead632fcefeb6b78e090fb9f9373f8d5 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 8 May 2019 14:20:36 +0200 Subject: [PATCH 017/321] Cut over MultiSearchResponse to Writeable (#41844) Relates to #34389 --- .../mustache/MultiSearchTemplateAction.java | 8 +- .../mustache/MultiSearchTemplateResponse.java | 63 +++++++------- .../action/search/MultiSearchAction.java | 8 +- .../action/search/MultiSearchResponse.java | 83 ++++++++----------- 4 files changed, 77 insertions(+), 85 deletions(-) diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java index 372b328bbfc1a..a9a44d0471586 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class MultiSearchTemplateAction extends Action { @@ -32,6 +33,11 @@ private MultiSearchTemplateAction() { @Override public MultiSearchTemplateResponse newResponse() { - return new MultiSearchTemplateResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return MultiSearchTemplateResponse::new; } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index 70e36ed85a496..dd8cdc04457ad 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -43,11 +43,19 @@ public class MultiSearchTemplateResponse extends ActionResponse implements Itera /** * A search template response item, holding the actual search template response, or an error message if it failed. */ - public static class Item implements Streamable { - private SearchTemplateResponse response; - private Exception exception; + public static class Item implements Writeable { + private final SearchTemplateResponse response; + private final Exception exception; - Item() { + private Item(StreamInput in) throws IOException { + if (in.readBoolean()) { + this.response = new SearchTemplateResponse(); + response.readFrom(in); + this.exception = null; + } else { + exception = in.readException(); + this.response = null; + } } public Item(SearchTemplateResponse response, Exception exception) { @@ -78,22 +86,6 @@ public SearchTemplateResponse getResponse() { return this.response; } - public static Item readItem(StreamInput in) throws IOException { - Item item = new Item(); - item.readFrom(in); - return item; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - if (in.readBoolean()) { - this.response = new SearchTemplateResponse(); - response.readFrom(in); - } else { - exception = in.readException(); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { if (response != null) { @@ -113,17 +105,25 @@ public Exception getFailure() { public String toString() { return "Item [response=" + response + ", exception=" + exception + "]"; } - - } - private Item[] items; - private long tookInMillis; + private final Item[] items; + private final long tookInMillis; - MultiSearchTemplateResponse() { + MultiSearchTemplateResponse(StreamInput in) throws IOException { + super(in); + items = new Item[in.readVInt()]; + for (int i = 0; i < items.length; i++) { + items[i] = new Item(in); + } + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + tookInMillis = in.readVLong(); + } else { + tookInMillis = -1L; + } } - public MultiSearchTemplateResponse(Item[] items, long tookInMillis) { + MultiSearchTemplateResponse(Item[] items, long tookInMillis) { this.items = items; this.tookInMillis = tookInMillis; } @@ -149,14 +149,7 @@ public TimeValue getTook() { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - items = new Item[in.readVInt()]; - for (int i = 0; i < items.length; i++) { - items[i] = Item.readItem(in); - } - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { - tookInMillis = in.readVLong(); - } + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java index 298c7593f6c97..9017a7b94ecb4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class MultiSearchAction extends Action { @@ -32,6 +33,11 @@ private MultiSearchAction() { @Override public MultiSearchResponse newResponse() { - return new MultiSearchResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return MultiSearchResponse::new; } } diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index 3ea90073f2b96..a924105bff6a2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -59,19 +59,37 @@ public class MultiSearchResponse extends ActionResponse implements Iterable Date: Wed, 8 May 2019 13:46:58 +0100 Subject: [PATCH 018/321] Make ISO8601 date parser accept timezone when time does not have seconds (#41896) Prior to this change the ISO8601 date parser would only parse an optional timezone if seconds were specified. This change moves the timezone to the same level of optional components as hour, so that timestamps without minutes or seconds may optionally contain a timezone. It also adds a unit test to cover all the supported formats. --- .../common/time/DateFormatters.java | 6 +-- .../joda/JavaJodaTimeDuellingTests.java | 7 +++ .../common/time/DateFormattersTests.java | 52 +++++++++++++++++++ 3 files changed, 62 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index b17787800858f..330681e2624a2 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -178,7 +178,7 @@ public class DateFormatters { /** * Returns a ISO 8601 compatible date time formatter and parser. * This is not fully compatible to the existing spec, which would require far more edge cases, but merely compatible with the - * existing joda time ISO data formater + * existing joda time ISO date formatter */ private static final DateFormatter ISO_8601 = new JavaDateFormatter("iso8601", STRICT_DATE_OPTIONAL_TIME_PRINTER, new DateTimeFormatterBuilder() @@ -201,6 +201,8 @@ public class DateFormatters { .appendFraction(NANO_OF_SECOND, 1, 9, false) .optionalEnd() .optionalEnd() + .optionalEnd() + .optionalEnd() .optionalStart() .appendZoneOrOffsetId() .optionalEnd() @@ -208,8 +210,6 @@ public class DateFormatters { .append(TIME_ZONE_FORMATTER_NO_COLON) .optionalEnd() .optionalEnd() - .optionalEnd() - .optionalEnd() .toFormatter(Locale.ROOT)); ///////////////////////////////////////// diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index c3a541fe87ec2..061d83c9c3865 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -733,14 +733,21 @@ public void testIso8601Parsers() { JodaDateFormatter jodaFormatter = new JodaDateFormatter(format, isoFormatter, isoFormatter); DateFormatter javaFormatter = DateFormatter.forPattern(format); + assertSameDate("2018-10-10", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10+0430", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11-08:00", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11Z", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11:12", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11:12+0100", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11:12.123", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11:12.123Z", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11:12.123+0000", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11:12,123", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11:12,123Z", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11:12,123+05:30", format, jodaFormatter, javaFormatter); } public void testParsingMissingTimezone() { diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index 23f08cf8ddfdf..8f2a661664304 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -198,6 +198,58 @@ public void testParsingStrictNanoDates() { formatter.format(formatter.parse("2018-05-15T17:14:56.123456789+01:00")); } + public void testIso8601Parsing() { + DateFormatter formatter = DateFormatters.forPattern("iso8601"); + + // timezone not allowed with just date + formatter.format(formatter.parse("2018-05-15")); + + formatter.format(formatter.parse("2018-05-15T17")); + formatter.format(formatter.parse("2018-05-15T17Z")); + formatter.format(formatter.parse("2018-05-15T17+0100")); + formatter.format(formatter.parse("2018-05-15T17+01:00")); + + formatter.format(formatter.parse("2018-05-15T17:14")); + formatter.format(formatter.parse("2018-05-15T17:14Z")); + formatter.format(formatter.parse("2018-05-15T17:14-0100")); + formatter.format(formatter.parse("2018-05-15T17:14-01:00")); + + formatter.format(formatter.parse("2018-05-15T17:14:56")); + formatter.format(formatter.parse("2018-05-15T17:14:56Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56+0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56+01:00")); + + // milliseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123-0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123-01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123+0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123+01:00")); + + // microseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123456")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456+0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456+01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456-0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456-01:00")); + + // nanoseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789-0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789-01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789+0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789+01:00")); + } + public void testRoundupFormatterWithEpochDates() { assertRoundupFormatter("epoch_millis", "1234567890", 1234567890L); // also check nanos of the epoch_millis formatter if it is rounded up to the nano second From df815e68237b862e6f9d760b47029107e7e524f8 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Wed, 8 May 2019 16:31:51 +0300 Subject: [PATCH 019/321] Add corretto 11 to runtime javas (#41946) We now have coretto on the ephemeral CI images so we can add it to our regular java test matrix. On backporting will also add corretto8 to branches that support it. --- .ci/matrix-runtime-javas.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml index 78db05ba3e9bf..61746ea59e1d0 100644 --- a/.ci/matrix-runtime-javas.yml +++ b/.ci/matrix-runtime-javas.yml @@ -11,3 +11,4 @@ ES_RUNTIME_JAVA: - openjdk12 - zulu11 - zulu12 + - corretto11 From 68c87eb1ae1f15cbc65650c31a41b4caaefc21f9 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 8 May 2019 09:04:57 -0500 Subject: [PATCH 020/321] Muting failing test 240_max_buckets/Max Buckets (#41949) --- .../rest-api-spec/test/search.aggregation/240_max_buckets.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml index c85ca9ee676f6..7761eb0f95155 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml @@ -75,6 +75,9 @@ setup: --- "Max bucket": + - skip: + version: "all" + reason: "AwaitsFix: https://github.com/elastic/elasticsearch/issues/41947" - do: cluster.put_settings: body: From c7db902604f4480c87346ac3dee1eb3a2db3297a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 8 May 2019 07:16:13 -0700 Subject: [PATCH 021/321] Add gradle plugin for downloading jdk (#41461) We currently download 3 variants of the same version of the jdk for bundling into the distributions. Additionally, the vagrant images do their own downloading. This commit moves the jdk downloading into a utility gradle plugin. This will be used in a future PR by the packaging tests. The new plugin exposes a "jdks" project extension which allows creating named jdks. Once the jdk version and platform are set for a named jdk, the jdk object may be used as a lazy String for the jdk home path, or a file collection for copying. --- buildSrc/build.gradle | 1 + .../java/org/elasticsearch/gradle/Jdk.java | 112 ++++++++++++ .../gradle/JdkDownloadPlugin.java | 170 ++++++++++++++++++ .../elasticsearch.jdk-download.properties | 1 + .../gradle/JdkDownloadPluginIT.java | 110 ++++++++++++ .../gradle/JdkDownloadPluginTests.java | 78 ++++++++ .../gradle/test/BaseTestCase.java | 21 +++ .../gradle/openjdk-1.0.2_linux-x64_bin.tar.gz | Bin 0 -> 181 bytes .../gradle/openjdk-1.0.2_osx-x64_bin.tar.gz | Bin 0 -> 238 bytes .../gradle/openjdk-1.0.2_windows-x64_bin.zip | Bin 0 -> 490 bytes .../src/testKit/jdk-download/build.gradle | 15 ++ .../testKit/jdk-download/reuse/build.gradle | 9 + .../src/testKit/jdk-download/settings.gradle | 1 + .../testKit/jdk-download/subproj/build.gradle | 41 +++++ distribution/archives/build.gradle | 2 +- distribution/build.gradle | 89 ++------- distribution/packages/build.gradle | 2 +- 17 files changed, 576 insertions(+), 76 deletions(-) create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java create mode 100644 buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.jdk-download.properties create mode 100644 buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java create mode 100644 buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java create mode 100644 buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_linux-x64_bin.tar.gz create mode 100644 buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_osx-x64_bin.tar.gz create mode 100644 buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_windows-x64_bin.zip create mode 100644 buildSrc/src/testKit/jdk-download/build.gradle create mode 100644 buildSrc/src/testKit/jdk-download/reuse/build.gradle create mode 100644 buildSrc/src/testKit/jdk-download/settings.gradle create mode 100644 buildSrc/src/testKit/jdk-download/subproj/build.gradle diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 0a8e09188e302..737bbca4cafb9 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -124,6 +124,7 @@ dependencies { compile 'com.avast.gradle:gradle-docker-compose-plugin:0.8.12' testCompile "junit:junit:${props.getProperty('junit')}" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" + testCompile 'com.github.tomakehurst:wiremock-jre8-standalone:2.23.2' } /***************************************************************************** diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java new file mode 100644 index 0000000000000..aa26f398e8be9 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import org.gradle.api.Buildable; +import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.provider.Property; +import org.gradle.api.tasks.TaskDependency; + +import java.io.File; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.regex.Pattern; + +public class Jdk implements Buildable, Iterable { + + static final Pattern VERSION_PATTERN = Pattern.compile("(\\d+)(\\.\\d+\\.\\d+)?\\+(\\d+)(@([a-f0-9]{32}))?"); + private static final List ALLOWED_PLATFORMS = Collections.unmodifiableList(Arrays.asList("linux", "windows", "darwin")); + + private final String name; + private final Configuration configuration; + + private final Property version; + private final Property platform; + + + Jdk(String name, Project project) { + this.name = name; + this.configuration = project.getConfigurations().create("jdk_" + name); + this.version = project.getObjects().property(String.class); + this.platform = project.getObjects().property(String.class); + } + + public String getName() { + return name; + } + + public String getVersion() { + return version.get(); + } + + public void setVersion(String version) { + if (VERSION_PATTERN.matcher(version).matches() == false) { + throw new IllegalArgumentException("malformed version [" + version + "] for jdk [" + name + "]"); + } + this.version.set(version); + } + + public String getPlatform() { + return platform.get(); + } + + public void setPlatform(String platform) { + if (ALLOWED_PLATFORMS.contains(platform) == false) { + throw new IllegalArgumentException( + "unknown platform [" + platform + "] for jdk [" + name + "], must be one of " + ALLOWED_PLATFORMS); + } + this.platform.set(platform); + } + + // pkg private, for internal use + Configuration getConfiguration() { + return configuration; + } + + @Override + public String toString() { + return configuration.getSingleFile().toString(); + } + + @Override + public TaskDependency getBuildDependencies() { + return configuration.getBuildDependencies(); + } + + // internal, make this jdks configuration unmodifiable + void finalizeValues() { + if (version.isPresent() == false) { + throw new IllegalArgumentException("version not specified for jdk [" + name + "]"); + } + if (platform.isPresent() == false) { + throw new IllegalArgumentException("platform not specified for jdk [" + name + "]"); + } + version.finalizeValue(); + platform.finalizeValue(); + } + + @Override + public Iterator iterator() { + return configuration.iterator(); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java new file mode 100644 index 0000000000000..a6372dfd231ac --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java @@ -0,0 +1,170 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import org.gradle.api.Action; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.UnknownTaskException; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.ConfigurationContainer; +import org.gradle.api.artifacts.dsl.DependencyHandler; +import org.gradle.api.artifacts.repositories.IvyArtifactRepository; +import org.gradle.api.file.CopySpec; +import org.gradle.api.file.FileTree; +import org.gradle.api.file.RelativePath; +import org.gradle.api.tasks.Copy; +import org.gradle.api.tasks.TaskProvider; + +import java.io.File; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.function.Supplier; +import java.util.regex.Matcher; + +public class JdkDownloadPlugin implements Plugin { + + @Override + public void apply(Project project) { + NamedDomainObjectContainer jdksContainer = project.container(Jdk.class, name -> + new Jdk(name, project) + ); + project.getExtensions().add("jdks", jdksContainer); + + project.afterEvaluate(p -> { + for (Jdk jdk : jdksContainer) { + jdk.finalizeValues(); + String version = jdk.getVersion(); + String platform = jdk.getPlatform(); + + // depend on the jdk directory "artifact" from the root project + DependencyHandler dependencies = project.getDependencies(); + Map depConfig = new HashMap<>(); + depConfig.put("path", ":"); // root project + depConfig.put("configuration", configName("extracted_jdk", version, platform)); + dependencies.add(jdk.getConfiguration().getName(), dependencies.project(depConfig)); + + // ensure a root level jdk download task exists + setupRootJdkDownload(project.getRootProject(), platform, version); + } + }); + } + + private static void setupRootJdkDownload(Project rootProject, String platform, String version) { + String extractTaskName = "extract" + capitalize(platform) + "Jdk" + version; + // NOTE: this is *horrendous*, but seems to be the only way to check for the existence of a registered task + try { + rootProject.getTasks().named(extractTaskName); + // already setup this version + return; + } catch (UnknownTaskException e) { + // fall through: register the task + } + + // decompose the bundled jdk version, broken into elements as: [feature, interim, update, build] + // Note the "patch" version is not yet handled here, as it has not yet been used by java. + Matcher jdkVersionMatcher = Jdk.VERSION_PATTERN.matcher(version); + if (jdkVersionMatcher.matches() == false) { + throw new IllegalArgumentException("Malformed jdk version [" + version + "]"); + } + String jdkVersion = jdkVersionMatcher.group(1) + (jdkVersionMatcher.group(2) != null ? (jdkVersionMatcher.group(2)) : ""); + String jdkMajor = jdkVersionMatcher.group(1); + String jdkBuild = jdkVersionMatcher.group(3); + String hash = jdkVersionMatcher.group(5); + + // add fake ivy repo for jdk url + String repoName = "jdk_repo_" + version; + if (rootProject.getRepositories().findByName(repoName) == null) { + // simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back + rootProject.getRepositories().ivy(ivyRepo -> { + ivyRepo.setName(repoName); + ivyRepo.setUrl("https://download.oracle.com"); + ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + ivyRepo.patternLayout(layout -> + layout.artifact("java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.content(content -> content.includeGroup("jdk")); + }); + // current pattern since 12.0.1 + rootProject.getRepositories().ivy(ivyRepo -> { + ivyRepo.setName(repoName + "_with_hash"); + ivyRepo.setUrl("https://download.oracle.com"); + ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + ivyRepo.patternLayout(layout -> layout.artifact( + "java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.content(content -> content.includeGroup("jdk")); + }); + } + + // add the jdk as a "dependency" + final ConfigurationContainer configurations = rootProject.getConfigurations(); + String remoteConfigName = configName("openjdk", version, platform); + String localConfigName = configName("extracted_jdk", version, platform); + Configuration jdkConfig = configurations.findByName(remoteConfigName); + if (jdkConfig == null) { + jdkConfig = configurations.create(remoteConfigName); + configurations.create(localConfigName); + } + String extension = platform.equals("windows") ? "zip" : "tar.gz"; + String jdkDep = "jdk:" + (platform.equals("darwin") ? "osx" : platform) + ":" + jdkVersion + "@" + extension; + rootProject.getDependencies().add(configName("openjdk", version, platform), jdkDep); + + // add task for extraction + // TODO: look into doing this as an artifact transform, which are cacheable starting in gradle 5.3 + int rootNdx = platform.equals("darwin") ? 2 : 1; + Action removeRootDir = copy -> { + // remove extra unnecessary directory levels + copy.eachFile(details -> { + String[] pathSegments = details.getRelativePath().getSegments(); + String[] newPathSegments = Arrays.copyOfRange(pathSegments, rootNdx, pathSegments.length); + details.setRelativePath(new RelativePath(true, newPathSegments)); + }); + copy.setIncludeEmptyDirs(false); + }; + // delay resolving jdkConfig until runtime + Supplier jdkArchiveGetter = jdkConfig::getSingleFile; + final Callable fileGetter; + if (extension.equals("zip")) { + fileGetter = () -> rootProject.zipTree(jdkArchiveGetter.get()); + } else { + fileGetter = () -> rootProject.tarTree(rootProject.getResources().gzip(jdkArchiveGetter.get())); + } + String extractDir = rootProject.getBuildDir().toPath().resolve("jdks/openjdk-" + jdkVersion + "_" + platform).toString(); + TaskProvider extractTask = rootProject.getTasks().register(extractTaskName, Copy.class, copyTask -> { + copyTask.doFirst(t -> rootProject.delete(extractDir)); + copyTask.into(extractDir); + copyTask.from(fileGetter, removeRootDir); + }); + rootProject.getArtifacts().add(localConfigName, + rootProject.getLayout().getProjectDirectory().dir(extractDir), + artifact -> artifact.builtBy(extractTask)); + } + + private static String configName(String prefix, String version, String platform) { + return prefix + "_" + version + "_" + platform; + } + + private static String capitalize(String s) { + return s.substring(0, 1).toUpperCase(Locale.ROOT) + s.substring(1); + } +} diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.jdk-download.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.jdk-download.properties new file mode 100644 index 0000000000000..7568724a32a0b --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.jdk-download.properties @@ -0,0 +1 @@ +implementation-class=org.elasticsearch.gradle.JdkDownloadPlugin \ No newline at end of file diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java new file mode 100644 index 0000000000000..5f982e1b47d93 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import com.github.tomakehurst.wiremock.WireMockServer; +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.function.Consumer; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.head; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static org.hamcrest.CoreMatchers.equalTo; + +public class JdkDownloadPluginIT extends GradleIntegrationTestCase { + + private static final String FAKE_JDK_VERSION = "1.0.2+99"; + private static final Pattern JDK_HOME_LOGLINE = Pattern.compile("JDK HOME: (.*)"); + private static final Pattern NUM_CONFIGS_LOGLINE = Pattern.compile("NUM CONFIGS: (.*)"); + + public void testLinuxExtraction() throws IOException { + assertExtraction("getLinuxJdk", "linux", "bin/java"); + } + + public void testDarwinExtraction() throws IOException { + assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java"); + } + + public void testWindowsExtraction() throws IOException { + assertExtraction("getWindowsJdk", "windows", "bin/java"); + } + + public void testCrossProjectReuse() throws IOException { + runBuild("numConfigurations", "linux", result -> { + Matcher matcher = NUM_CONFIGS_LOGLINE.matcher(result.getOutput()); + assertTrue("could not find num configs in output: " + result.getOutput(), matcher.find()); + assertThat(Integer.parseInt(matcher.group(1)), equalTo(6)); // 3 import configs, 3 export configs + }); + } + + public void assertExtraction(String taskname, String platform, String javaBin) throws IOException { + runBuild(taskname, platform, result -> { + Matcher matcher = JDK_HOME_LOGLINE.matcher(result.getOutput()); + assertTrue("could not find jdk home in output: " + result.getOutput(), matcher.find()); + String jdkHome = matcher.group(1); + Path javaPath = Paths.get(jdkHome, javaBin); + assertTrue(javaPath.toString(), Files.exists(javaPath)); + }); + } + + private void runBuild(String taskname, String platform, Consumer assertions) throws IOException { + WireMockServer wireMock = new WireMockServer(0); + try { + String extension = platform.equals("windows") ? "zip" : "tar.gz"; + String filename = "openjdk-1.0.2_" + platform + "-x64_bin." + extension; + wireMock.stubFor(head(urlEqualTo("/java/GA/jdk1/99/GPL/" + filename)) + .willReturn(aResponse().withStatus(200))); + final byte[] filebytes; + try (InputStream stream = JdkDownloadPluginIT.class.getResourceAsStream(filename)) { + filebytes = stream.readAllBytes(); + } + wireMock.stubFor(get(urlEqualTo("/java/GA/jdk1/99/GPL/" + filename)) + .willReturn(aResponse().withStatus(200).withBody(filebytes))); + wireMock.start(); + + GradleRunner runner = GradleRunner.create().withProjectDir(getProjectDir("jdk-download")) + .withArguments(taskname, + "-Dlocal.repo.path=" + getLocalTestRepoPath(), + "-Dtests.jdk_version=" + FAKE_JDK_VERSION, + "-Dtests.jdk_repo=" + wireMock.baseUrl()) + .withPluginClasspath(); + + BuildResult result = runner.build(); + assertions.accept(result); + } catch (Exception e) { + // for debugging + System.err.println("missed requests: " + wireMock.findUnmatchedRequests().getRequests()); + throw e; + } finally { + wireMock.stop(); + } + } +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java new file mode 100644 index 0000000000000..c6ca817e759fa --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import org.elasticsearch.gradle.test.GradleUnitTestCase; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Project; +import org.gradle.testfixtures.ProjectBuilder; +import org.junit.BeforeClass; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class JdkDownloadPluginTests extends GradleUnitTestCase { + private static Project rootProject; + + @BeforeClass + public static void setupRoot() { + rootProject = ProjectBuilder.builder().build(); + } + + public void testMissingVersion() { + assertJdkError(createProject(), "testjdk", null, "linux", "version not specified for jdk [testjdk]"); + } + + public void testMissingPlatform() { + assertJdkError(createProject(), "testjdk", "11.0.2+33", null, "platform not specified for jdk [testjdk]"); + } + + public void testUnknownPlatform() { + assertJdkError(createProject(), "testjdk", "11.0.2+33", "unknown", + "unknown platform [unknown] for jdk [testjdk], must be one of [linux, windows, darwin]"); + } + + public void testBadVersionFormat() { + assertJdkError(createProject(), "testjdk", "badversion", "linux", "malformed version [badversion] for jdk [testjdk]"); + } + + private void assertJdkError(Project project, String name, String version, String platform, String message) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createJdk(project, name, version, platform)); + assertThat(e.getMessage(), equalTo(message)); + } + + private void createJdk(Project project, String name, String version, String platform) { + @SuppressWarnings("unchecked") + NamedDomainObjectContainer jdks = (NamedDomainObjectContainer) project.getExtensions().getByName("jdks"); + jdks.create(name, jdk -> { + if (version != null) { + jdk.setVersion(version); + } + if (platform != null) { + jdk.setPlatform(platform); + } + }).finalizeValues(); + } + + private Project createProject() { + Project project = ProjectBuilder.builder().withParent(rootProject).build(); + project.getPlugins().apply("elasticsearch.jdk-download"); + return project; + } +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java index c3262ee1e26e6..0fc26f0284c44 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java @@ -22,6 +22,7 @@ import com.carrotsearch.randomizedtesting.RandomizedRunner; import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; +import junit.framework.AssertionFailedError; import org.junit.Assert; import org.junit.runner.RunWith; @@ -32,4 +33,24 @@ }) @ThreadLeakLingering(linger = 5000) // wait for "Connection worker" to die public abstract class BaseTestCase extends Assert { + + // add expectThrows from junit 5 + @FunctionalInterface + public interface ThrowingRunnable { + void run() throws Throwable; + } + public static T expectThrows(Class expectedType, ThrowingRunnable runnable) { + try { + runnable.run(); + } catch (Throwable e) { + if (expectedType.isInstance(e)) { + return expectedType.cast(e); + } + AssertionFailedError assertion = + new AssertionFailedError("Unexpected exception type, expected " + expectedType.getSimpleName() + " but got " + e); + assertion.initCause(e); + throw assertion; + } + throw new AssertionFailedError("Expected exception "+ expectedType.getSimpleName() + " but no exception was thrown"); + } } diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_linux-x64_bin.tar.gz b/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_linux-x64_bin.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..d38b03a4c2a48fae69aafa2650c1320c45e04a9c GIT binary patch literal 181 zcmb2|=3oek-yg%k{Pyxit|kMKV;|*G^(V~TDf`{VjZ^o7)7t0xOCFE7?J z+qStj+Gfsnv&lO5bH$XE9Ix7BT?{{28_#~sYJH}`j|FaPHs8zb(Yt=>^3u1b)V5wa zk{rDK?(xdZsL{U2u4>#TqJr+Ue6jXm=x g?GK;+Z~vM5r*_+)muEl%ugjRDkMK(|XfQAU00Q_^Bme*a literal 0 HcmV?d00001 diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_osx-x64_bin.tar.gz b/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_osx-x64_bin.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..9ac1da5e181468368b88b95170845255e071169b GIT binary patch literal 238 zcmb2|=3p>t-yg%k{PvnJUz34|+eN*l(i`^t(?8I8`v6C$gd$_fUo$6}84YG)LSL;X z8)Y;U-DjA%@u=X$?bGguo3B}NW<%eOE61w*Z)KeK)GF#K-2G>@VcE-HdwADh3UiGv z|NCU$hP68B?~La?-dM*uF|_@%8<&t*%WBtU58wVfSEcLwBHyF>OZ?&D|GB~6UGJ7_ zvMcV}UwUi7e<`)!?`IWH%;0$R+j{kddeP5+zaO_c$m8<$y3v>Kr?P=sUjGZNJ8NI< m_CxO6jE;X=e;A{VX`U5){ copyLog4jProperties(buildDefaultLog4jConfig, xpackModule) } -/***************************************************************************** - * JDKs * - *****************************************************************************/ -// extract the bundled jdk version, broken into elements as: [feature, interim, update, build] -// Note the "patch" version is not yet handled here, as it has not yet been used by java. -Pattern JDK_VERSION = Pattern.compile("(\\d+)(\\.\\d+\\.\\d+)?\\+(\\d+)@([a-f0-9]{32})?") -Matcher jdkVersionMatcher = JDK_VERSION.matcher(VersionProperties.bundledJdk) -if (jdkVersionMatcher.matches() == false) { - throw new IllegalArgumentException("Malformed jdk version [" + VersionProperties.bundledJdk + "]") -} -String jdkVersion = jdkVersionMatcher.group(1) + (jdkVersionMatcher.group(2) != null ? (jdkVersionMatcher.group(2)) : "") -String jdkMajor = jdkVersionMatcher.group(1) -String jdkBuild = jdkVersionMatcher.group(3) -String hash = jdkVersionMatcher.group(4) - -repositories { - // simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back - ivy { - name "legacy-jdk" - url "https://download.oracle.com" - metadataSources { - artifact() - } - patternLayout { - artifact "java/GA/jdk${jdkMajor}/${jdkBuild}/GPL/openjdk-[revision]_[module]-x64_bin.[ext]" - } - } - // current pattern since 12.0.1 - ivy { - name "jdk" - url "https://download.oracle.com" - metadataSources { - artifact() - } - patternLayout { - artifact "java/GA/jdk${jdkVersion}/${hash}/${jdkBuild}/GPL/openjdk-[revision]_[module]-x64_bin.[ext]" - } - } -} -for (String platform : ['linux', 'darwin', 'windows']) { - String jdkConfigName = "jdk_${platform}" - Configuration jdkConfig = configurations.create(jdkConfigName) - String extension = platform.equals('windows') ? 'zip' : 'tar.gz' - dependencies.add(jdkConfigName, "jdk:${platform.equals('darwin') ? 'osx' : platform}:${jdkVersion}@${extension}") - - int rootNdx = platform.equals('darwin') ? 2 : 1 - Closure removeRootDir = { - it.eachFile { FileCopyDetails details -> - details.relativePath = new RelativePath(true, details.relativePath.segments[rootNdx..-1] as String[]) - } - it.includeEmptyDirs false - } - String extractDir = "${buildDir}/jdks/openjdk-${jdkVersion}_${platform}" - project.task("extract${platform.capitalize()}Jdk", type: Copy) { - doFirst { - project.delete(extractDir) - } - into extractDir - if (extension.equals('zip')) { - from({ zipTree(jdkConfig.singleFile) }, removeRootDir) - } else { - from({ tarTree(resources.gzip(jdkConfig.singleFile)) }, removeRootDir) - } - } -} - // make sure we have a clean task since we aren't a java project, but we have tasks that // put stuff in the build dir task clean(type: Delete) { @@ -292,6 +224,9 @@ task clean(type: Delete) { } configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { + + apply plugin: 'elasticsearch.jdk-download' + // TODO: the map needs to be an input of the tasks, so that when it changes, the task will re-run... /***************************************************************************** * Properties to expand when copying packaging files * @@ -433,9 +368,15 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } - jdkFiles = { platform -> - copySpec { - from project(':distribution').tasks.getByName("extract${platform.capitalize()}Jdk") + jdkFiles = { project, platform -> + project.jdks { + "bundled_${platform}" { + it.platform = platform + it.version = VersionProperties.bundledJdk + } + } + return copySpec { + from project.jdks."bundled_${platform}" eachFile { FileCopyDetails details -> if (details.relativePath.segments[-2] == 'bin' || details.relativePath.segments[-1] == 'jspawnhelper') { details.mode = 0755 diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 6acef5adc5321..72804c7a907c9 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -143,7 +143,7 @@ Closure commonPackageConfig(String type, boolean oss, boolean jdk) { } if (jdk) { into('jdk') { - with jdkFiles('linux') + with jdkFiles(project, 'linux') } } // we need to specify every intermediate directory in these paths so the package managers know they are explicitly From 7276fb624bcb07abff8b16b92091746331713779 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 8 May 2019 07:31:27 -0700 Subject: [PATCH 022/321] Update lintian overrides (#41561) The deb package has been updated several times in the past to contain overrides in order to pass lintian inspection. However, there have never been any tests to ensure we do not fallback to failure. This commit updates the overrides file given things that have changed since 2.x like adding ML and bundling the jdk. closes #17185 --- Vagrantfile | 6 +- distribution/packages/build.gradle | 17 ++++-- .../packages/src/deb/lintian/elasticsearch | 56 ++++++++++++++++--- .../packaging/test/PackageTestCase.java | 6 ++ 4 files changed, 71 insertions(+), 14 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index ef64eaa0071fc..3bc29005f9b01 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -158,13 +158,17 @@ def deb_common(config, name, extra: '') s.privileged = false s.inline = "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile" end + extra_with_lintian = <<-SHELL + install lintian + #{extra} + SHELL linux_common( config, name, update_command: 'apt-get update', update_tracking_file: '/var/cache/apt/archives/last_update', install_command: 'apt-get install -y', - extra: extra + extra: extra_with_lintian ) end diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 72804c7a907c9..136803a5d83ea 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -270,13 +270,13 @@ apply plugin: 'nebula.ospackage-base' // this is package indepdendent configuration ospackage { maintainer 'Elasticsearch Team ' - summary ''' - Elasticsearch is a distributed RESTful search engine built for the cloud. + summary 'Distributed RESTful search engine built for the cloud' + packageDescription ''' Reference documentation can be found at https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html and the 'Elasticsearch: The Definitive Guide' book can be found at https://www.elastic.co/guide/en/elasticsearch/guide/current/index.html - '''.stripIndent().replace('\n', ' ').trim() + '''.stripIndent().trim() url 'https://www.elastic.co/' // signing setup @@ -288,7 +288,8 @@ ospackage { new File(new File(System.getProperty('user.home'), '.gnupg'), 'secring.gpg') } - requires('coreutils') + // version found on oldest supported distro, centos-6 + requires('coreutils', '8.4', GREATER | EQUAL) fileMode 0644 dirMode 0755 @@ -312,12 +313,18 @@ Closure commonDebConfig(boolean oss, boolean jdk) { version = project.version.replace('-', '~') packageGroup 'web' - requires 'bash' + + // versions found on oldest supported distro, centos-6 + requires('bash', '4.1', GREATER | EQUAL) + requires('lsb-base', '4', GREATER | EQUAL) requires 'libc6' requires 'adduser' into('/usr/share/lintian/overrides') { from('src/deb/lintian/elasticsearch') + if (oss) { + rename('elasticsearch', 'elasticsearch-oss') + } } } } diff --git a/distribution/packages/src/deb/lintian/elasticsearch b/distribution/packages/src/deb/lintian/elasticsearch index 1ca52eaed2863..98038177a003b 100644 --- a/distribution/packages/src/deb/lintian/elasticsearch +++ b/distribution/packages/src/deb/lintian/elasticsearch @@ -1,8 +1,48 @@ -# Ignore arch dependent warnings, we chose the right libs on start -elasticsearch binary: arch-independent-package-contains-binary-or-object -# Not stripping external libraries -elasticsearch binary: unstripped-binary-or-object -# Ignore arch dependent warnings, we chose the right libs on start -elasticsearch binary: arch-dependent-file-in-usr-share -# Please check our changelog at http://www.elastic.co/downloads/elasticsearch -elasticsearch binary: changelog-file-missing-in-native-package +# we don't have a changelog, but we put our copyright file +# under /usr/share/doc/elasticsearch, which triggers this warning +changelog-file-missing-in-native-package + +# we intentionally copy our copyright file for all deb packages +copyright-file-contains-full-apache-2-license +copyright-should-refer-to-common-license-file-for-apache-2 +copyright-without-copyright-notice + +# we still put all our files under /usr/share/elasticsearch even after transition to platform dependent packages +arch-dependent-file-in-usr-share + +# we have a bundled jdk, so don't use jarwrapper +missing-dep-on-jarwrapper + +# we prefer to not make our config and log files world readable +non-standard-file-perm etc/default/elasticsearch 0660 != 0644 +non-standard-dir-perm etc/elasticsearch/ 2750 != 0755 +non-standard-file-perm etc/elasticsearch/* +non-standard-dir-perm var/lib/elasticsearch/ 2750 != 0755 +non-standard-dir-perm var/log/elasticsearch/ 2750 != 0755 +executable-is-not-world-readable etc/init.d/elasticsearch 0750 +non-standard-file-permissions-for-etc-init.d-script etc/init.d/elasticsearch 0750 != 0755 + +# this lintian tag is simply wrong; contrary to the explanation, debian systemd +# does actually look at /usr/lib/systemd/system +systemd-service-file-outside-lib usr/lib/systemd/system/elasticsearch.service + +# we do not automatically enable the service in init.d or systemd +script-in-etc-init.d-not-registered-via-update-rc.d etc/init.d/elasticsearch + +# the package scripts handle init.d/systemd directly and don't need to use deb helpers +maintainer-script-calls-systemctl +prerm-calls-updaterc.d elasticsearch + +# bundled JDK +embedded-library +arch-dependent-file-in-usr-share usr/share/elasticsearch/jdk/* +unstripped-binary-or-object usr/share/elasticsearch/jdk/* +extra-license-file usr/share/elasticsearch/jdk/legal/* +hardening-no-pie usr/share/elasticsearch/jdk/bin/* +hardening-no-pie usr/share/elasticsearch/jdk/lib/* + +# the system java version that lintian assumes is far behind what elasticsearch uses +unknown-java-class-version + +# elastic licensed modules contain elastic license +extra-license-file usr/share/elasticsearch/modules/* diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java index c664e28931087..1b43ebeb00a97 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java @@ -56,6 +56,7 @@ import static org.elasticsearch.packaging.util.Packages.stopElasticsearch; import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; import static org.elasticsearch.packaging.util.Platforms.getOsRelease; +import static org.elasticsearch.packaging.util.Platforms.isDPKG; import static org.elasticsearch.packaging.util.Platforms.isSystemd; import static org.elasticsearch.packaging.util.ServerUtils.makeRequest; import static org.elasticsearch.packaging.util.ServerUtils.runElasticsearchTests; @@ -78,6 +79,11 @@ public void onlyCompatibleDistributions() throws Exception { sh = newShell(); } + public void test05CheckLintian() throws Exception { + assumeTrue(isDPKG()); + sh.run("lintian --fail-on-warnings " + FileUtils.getDistributionFile(distribution())); + } + public void test10InstallPackage() throws Exception { assertRemoved(distribution()); installation = install(distribution()); From 2151ecbba897571790eb50000fc87b9f7ba905c0 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 8 May 2019 12:37:43 -0400 Subject: [PATCH 023/321] Cleanup RollupSearch exceptions, disallow partial results (#41272) - msearch exceptions should be thrown directly instead of wrapping in a RuntimeException - Do not allow partial results (where some indices are missing), instead throw an exception if any index is missing --- .../rollup/RollupResponseTranslator.java | 100 ++++++++++-------- .../action/TransportRollupSearchAction.java | 2 +- .../RollupResponseTranslationTests.java | 45 ++++---- .../rollup/action/SearchActionTests.java | 6 +- .../test/rollup/rollup_search.yml | 45 ++++++++ 5 files changed, 127 insertions(+), 71 deletions(-) diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java index e900d76c84913..4a8d007e3b898 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java @@ -8,6 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; @@ -61,9 +62,9 @@ public class RollupResponseTranslator { * Verifies a live-only search response. Essentially just checks for failure then returns * the response since we have no work to do */ - public static SearchResponse verifyResponse(MultiSearchResponse.Item normalResponse) { + public static SearchResponse verifyResponse(MultiSearchResponse.Item normalResponse) throws Exception { if (normalResponse.isFailure()) { - throw new RuntimeException(normalResponse.getFailureMessage(), normalResponse.getFailure()); + throw normalResponse.getFailure(); } return normalResponse.getResponse(); } @@ -77,16 +78,30 @@ public static SearchResponse verifyResponse(MultiSearchResponse.Item normalRespo * on the translation conventions */ public static SearchResponse translateResponse(MultiSearchResponse.Item[] rolledMsearch, - InternalAggregation.ReduceContext reduceContext) { + InternalAggregation.ReduceContext reduceContext) throws Exception { + + assert rolledMsearch.length > 0; + List responses = new ArrayList<>(); + for (MultiSearchResponse.Item item : rolledMsearch) { + if (item.isFailure()) { + Exception e = item.getFailure(); + + // If an index was deleted after execution, give a hint to the user that this is a transient error + if (e instanceof IndexNotFoundException) { + throw new ResourceNotFoundException("Index [" + ((IndexNotFoundException) e).getIndex().getName() + + "] was not found, likely because it was deleted while the request was in-flight. " + + "Rollup does not support partial search results, please try the request again."); + } - List responses = Arrays.stream(rolledMsearch) - .map(item -> { - if (item.isFailure()) { - throw new RuntimeException(item.getFailureMessage(), item.getFailure()); - } - return item.getResponse(); - }).collect(Collectors.toList()); + // Otherwise just throw + throw e; + } + // No error, add to responses + responses.add(item.getResponse()); + } + + assert responses.size() > 0; return doCombineResponse(null, responses, reduceContext); } @@ -187,48 +202,45 @@ public static SearchResponse translateResponse(MultiSearchResponse.Item[] rolled * @param msearchResponses The responses from the msearch, where the first response is the live-index response */ public static SearchResponse combineResponses(MultiSearchResponse.Item[] msearchResponses, - InternalAggregation.ReduceContext reduceContext) { - boolean liveMissing = false; + InternalAggregation.ReduceContext reduceContext) throws Exception { + assert msearchResponses.length >= 2; - // The live response is always first - MultiSearchResponse.Item liveResponse = msearchResponses[0]; - if (liveResponse.isFailure()) { - Exception e = liveResponse.getFailure(); - // If we have a rollup response we can tolerate a missing live response - if (e instanceof IndexNotFoundException) { - logger.warn("\"Live\" index not found during rollup search.", e); - liveMissing = true; + boolean first = true; + SearchResponse liveResponse = null; + List rolledResponses = new ArrayList<>(); + for (MultiSearchResponse.Item item : msearchResponses) { + if (item.isFailure()) { + Exception e = item.getFailure(); + + // If an index was deleted after execution, give a hint to the user that this is a transient error + if (e instanceof IndexNotFoundException) { + throw new ResourceNotFoundException("Index [" + ((IndexNotFoundException) e).getIndex() + "] was not found, " + + "likely because it was deleted while the request was in-flight. Rollup does not support partial search results, " + + "please try the request again.", e); + } + + // Otherwise just throw + throw e; + } + + // No error, add to responses + if (first) { + liveResponse = item.getResponse(); } else { - throw new RuntimeException(liveResponse.getFailureMessage(), liveResponse.getFailure()); + rolledResponses.add(item.getResponse()); } + first = false; } - List rolledResponses = Arrays.stream(msearchResponses) - .skip(1) - .map(item -> { - if (item.isFailure()) { - Exception e = item.getFailure(); - // If we have a normal response we can tolerate a missing rollup response, although it theoretically - // should be handled by a different code path (verifyResponse) - if (e instanceof IndexNotFoundException) { - logger.warn("Rollup index not found during rollup search.", e); - } else { - throw new RuntimeException(item.getFailureMessage(), item.getFailure()); - } - return null; - } else { - return item.getResponse(); - } - }).filter(Objects::nonNull).collect(Collectors.toList()); - // If we only have a live index left, process it directly - if (rolledResponses.isEmpty() && liveMissing == false) { - return verifyResponse(liveResponse); - } else if (rolledResponses.isEmpty() && liveMissing) { - throw new RuntimeException("No indices (live or rollup) found during rollup search"); + // If we only have a live index left, just return it directly. We know it can't be an error already + if (rolledResponses.isEmpty() && liveResponse != null) { + return liveResponse; + } else if (rolledResponses.isEmpty()) { + throw new ResourceNotFoundException("No indices (live or rollup) found during rollup search"); } - return doCombineResponse(liveResponse.getResponse(), rolledResponses, reduceContext); + return doCombineResponse(liveResponse, rolledResponses, reduceContext); } private static SearchResponse doCombineResponse(SearchResponse liveResponse, List rolledResponses, diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java index 414a0d08ef35a..2a1308353d6ad 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java @@ -111,7 +111,7 @@ protected void doExecute(Task task, SearchRequest request, ActionListener RollupResponseTranslator.combineResponses(failure, + () -> RollupResponseTranslator.translateResponse(failure, new InternalAggregation.ReduceContext(bigArrays, scriptService, true))); assertThat(e.getMessage(), equalTo("rollup failure")); } @@ -130,13 +130,14 @@ public void testLiveMissingRollupMissing() { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); - Exception e = expectThrows(RuntimeException.class, + ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> RollupResponseTranslator.combineResponses(failure, new InternalAggregation.ReduceContext(bigArrays, scriptService, true))); - assertThat(e.getMessage(), equalTo("No indices (live or rollup) found during rollup search")); + assertThat(e.getMessage(), equalTo("Index [[foo]] was not found, likely because it was deleted while the request was in-flight. " + + "Rollup does not support partial search results, please try the request again.")); } - public void testMissingLiveIndex() { + public void testMissingLiveIndex() throws Exception { SearchResponse responseWithout = mock(SearchResponse.class); when(responseWithout.getTook()).thenReturn(new TimeValue(100)); List aggTree = new ArrayList<>(1); @@ -175,16 +176,13 @@ public void testMissingLiveIndex() { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); - SearchResponse response = RollupResponseTranslator.combineResponses(msearch, - new InternalAggregation.ReduceContext(bigArrays, scriptService, true)); - assertNotNull(response); - Aggregations responseAggs = response.getAggregations(); - assertNotNull(responseAggs); - Avg avg = responseAggs.get("foo"); - assertThat(avg.getValue(), equalTo(5.0)); + ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> RollupResponseTranslator.combineResponses(msearch, + new InternalAggregation.ReduceContext(bigArrays, scriptService, true))); + assertThat(e.getMessage(), equalTo("Index [[foo]] was not found, likely because it was deleted while the request was in-flight. " + + "Rollup does not support partial search results, please try the request again.")); } - public void testRolledMissingAggs() { + public void testRolledMissingAggs() throws Exception { SearchResponse responseWithout = mock(SearchResponse.class); when(responseWithout.getTook()).thenReturn(new TimeValue(100)); @@ -192,13 +190,12 @@ public void testRolledMissingAggs() { when(responseWithout.getAggregations()).thenReturn(mockAggsWithout); MultiSearchResponse.Item[] msearch = new MultiSearchResponse.Item[]{ - new MultiSearchResponse.Item(null, new IndexNotFoundException("foo")), new MultiSearchResponse.Item(responseWithout, null)}; BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); - SearchResponse response = RollupResponseTranslator.combineResponses(msearch, + SearchResponse response = RollupResponseTranslator.translateResponse(msearch, new InternalAggregation.ReduceContext(bigArrays, scriptService, true)); assertNotNull(response); Aggregations responseAggs = response.getAggregations(); @@ -215,12 +212,13 @@ public void testMissingRolledIndex() { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); - SearchResponse finalResponse = RollupResponseTranslator.combineResponses(msearch, - new InternalAggregation.ReduceContext(bigArrays, scriptService, true)); - assertThat(finalResponse, equalTo(response)); + ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> RollupResponseTranslator.combineResponses(msearch, + new InternalAggregation.ReduceContext(bigArrays, scriptService, true))); + assertThat(e.getMessage(), equalTo("Index [[foo]] was not found, likely because it was deleted while the request was in-flight. " + + "Rollup does not support partial search results, please try the request again.")); } - public void testVerifyNormal() { + public void testVerifyNormal() throws Exception { SearchResponse response = mock(SearchResponse.class); MultiSearchResponse.Item item = new MultiSearchResponse.Item(response, null); @@ -235,7 +233,7 @@ public void testVerifyMissingNormal() { assertThat(e.getMessage(), equalTo("no such index [foo]")); } - public void testTranslateRollup() { + public void testTranslateRollup() throws Exception { SearchResponse response = mock(SearchResponse.class); when(response.getTook()).thenReturn(new TimeValue(100)); List aggTree = new ArrayList<>(1); @@ -286,9 +284,10 @@ public void testTranslateMissingRollup() { ScriptService scriptService = mock(ScriptService.class); InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(bigArrays, scriptService, true); - Exception e = expectThrows(RuntimeException.class, + ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> RollupResponseTranslator.translateResponse(new MultiSearchResponse.Item[]{missing}, context)); - assertThat(e.getMessage(), equalTo("no such index [foo]")); + assertThat(e.getMessage(), equalTo("Index [foo] was not found, likely because it was deleted while the request was in-flight. " + + "Rollup does not support partial search results, please try the request again.")); } public void testMissingFilter() { @@ -351,7 +350,7 @@ public void testMatchingNameNotFilter() { equalTo("Expected [filter_foo] to be a FilterAggregation, but was [InternalMax]")); } - public void testSimpleReduction() { + public void testSimpleReduction() throws Exception { SearchResponse protoResponse = mock(SearchResponse.class); when(protoResponse.getTook()).thenReturn(new TimeValue(100)); List protoAggTree = new ArrayList<>(1); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index b1ae36c538fec..00f2fbd3171d4 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -594,7 +594,7 @@ public void testMatchingIndexInMetadata() throws IOException { assertThat(result.getJobCaps().size(), equalTo(1)); } - public void testLiveOnlyProcess() { + public void testLiveOnlyProcess() throws Exception { String[] indices = new String[]{"foo"}; IndexMetaData indexMetaData = mock(IndexMetaData.class); ImmutableOpenMap.Builder meta = ImmutableOpenMap.builder(1); @@ -611,7 +611,7 @@ public void testLiveOnlyProcess() { assertThat(r, equalTo(response)); } - public void testRollupOnly() throws IOException { + public void testRollupOnly() throws Exception { String[] indices = new String[]{"foo"}; String jobName = randomAlphaOfLength(5); @@ -711,7 +711,7 @@ public void testEmptyMsearch() { assertThat(e.getMessage(), equalTo("MSearch response was empty, cannot unroll RollupSearch results")); } - public void testBoth() throws IOException { + public void testBoth() throws Exception { String[] indices = new String[]{"foo", "bar"}; String jobName = randomAlphaOfLength(5); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index 0e052d33281e2..ca04327eab729 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -1244,4 +1244,49 @@ setup: - match: { aggregations.date_histogram#histo.buckets.3.doc_count: 20 } - match: { aggregations.date_histogram#histo.buckets.3.max#the_max.value: 4 } +--- +"Search error against live index": + + - do: + catch: bad_request + rollup.rollup_search: + index: "foo" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "asdfasdf" + + +--- +"Search error against rollup and live index": + + - do: + catch: bad_request + rollup.rollup_search: + index: "foo*" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "asdfasdf" + +--- +"Search error no matching indices": + + - do: + catch: /Must specify at least one concrete index/ + rollup.rollup_search: + index: "bar*" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" From 6bf8b19e8643e138f252c87f120629ea1a13a607 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Wed, 8 May 2019 10:29:02 -0700 Subject: [PATCH 024/321] Add static section whitelist info to api docs generation (#41870) This change adds imported methods, class bindings, and instance bindings to the documentation generation for the Painless Context APIs. --- .../painless-api-reference/index.asciidoc | 3 +- .../index.asciidoc | 4 + .../packages.asciidoc | 2 +- .../index.asciidoc | 17 ++ .../packages.asciidoc | 3 + .../index.asciidoc | 4 + .../packages.asciidoc | 2 +- .../index.asciidoc | 4 + .../packages.asciidoc | 2 +- .../index.asciidoc | 25 +++ .../packages.asciidoc | 2 +- .../index.asciidoc | 4 + .../packages.asciidoc | 42 ++-- .../painless/ContextDocGenerator.java | 207 +++++++++++++++--- 14 files changed, 261 insertions(+), 60 deletions(-) create mode 100644 docs/painless/painless-api-reference/painless-api-reference-field/index.asciidoc create mode 100644 docs/painless/painless-api-reference/painless-api-reference-field/packages.asciidoc diff --git a/docs/painless/painless-api-reference/index.asciidoc b/docs/painless/painless-api-reference/index.asciidoc index 88130f7fdfc02..652904533223e 100644 --- a/docs/painless/painless-api-reference/index.asciidoc +++ b/docs/painless/painless-api-reference/index.asciidoc @@ -10,7 +10,7 @@ |Aggs Reduce | <> | |Analysis | <> | <> |Bucket Aggregation | <> | -|Field | <> | +|Field | <> | <> |Filter | <> | |Ingest | <> | <> |Interval | <> | @@ -33,6 +33,7 @@ include::painless-api-reference-shared/index.asciidoc[] include::painless-api-reference-analysis/index.asciidoc[] +include::painless-api-reference-field/index.asciidoc[] include::painless-api-reference-ingest/index.asciidoc[] include::painless-api-reference-moving-function/index.asciidoc[] include::painless-api-reference-score/index.asciidoc[] diff --git a/docs/painless/painless-api-reference/painless-api-reference-analysis/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-analysis/index.asciidoc index 8dc729b31ea1f..d09af700a2fdc 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-analysis/index.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-analysis/index.asciidoc @@ -7,6 +7,10 @@ The following specialized API is available in the Analysis context. * See the <> for further API available in all contexts. +==== Classes By Package +The following classes are available grouped by their respective packages. Click on a class to view details about the available methods and fields. + + ==== org.elasticsearch.analysis.common <> diff --git a/docs/painless/painless-api-reference/painless-api-reference-analysis/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-analysis/packages.asciidoc index 106f9272df4a8..ff272cb228f6f 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-analysis/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-analysis/packages.asciidoc @@ -3,7 +3,7 @@ [role="exclude",id="painless-api-reference-analysis-org-elasticsearch-analysis-common"] === Analysis API for package org.elasticsearch.analysis.common -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-analysis-AnalysisPredicateScript-Token]] ==== AnalysisPredicateScript.Token diff --git a/docs/painless/painless-api-reference/painless-api-reference-field/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-field/index.asciidoc new file mode 100644 index 0000000000000..eb71e71ccf165 --- /dev/null +++ b/docs/painless/painless-api-reference/painless-api-reference-field/index.asciidoc @@ -0,0 +1,17 @@ +// This file is auto-generated. Do not edit. + +[[painless-api-reference-field]] +=== Field API + +The following specialized API is available in the Field context. + +* See the <> for further API available in all contexts. + +==== Static Methods +The following methods are directly callable without a class/instance qualifier. Note parameters denoted by a (*) are treated as read-only values. + +* List domainSplit(String) +* List domainSplit(String, Map) + +include::packages.asciidoc[] + diff --git a/docs/painless/painless-api-reference/painless-api-reference-field/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-field/packages.asciidoc new file mode 100644 index 0000000000000..282fcf136a65c --- /dev/null +++ b/docs/painless/painless-api-reference/painless-api-reference-field/packages.asciidoc @@ -0,0 +1,3 @@ +// This file is auto-generated. Do not edit. + + diff --git a/docs/painless/painless-api-reference/painless-api-reference-ingest/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-ingest/index.asciidoc index e4067c24dcea0..ff70233defb0c 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-ingest/index.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-ingest/index.asciidoc @@ -7,6 +7,10 @@ The following specialized API is available in the Ingest context. * See the <> for further API available in all contexts. +==== Classes By Package +The following classes are available grouped by their respective packages. Click on a class to view details about the available methods and fields. + + ==== org.elasticsearch.ingest.common <> diff --git a/docs/painless/painless-api-reference/painless-api-reference-ingest/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-ingest/packages.asciidoc index b6a48ee7d5d2e..a4a5a4529cc95 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-ingest/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-ingest/packages.asciidoc @@ -3,7 +3,7 @@ [role="exclude",id="painless-api-reference-ingest-org-elasticsearch-ingest-common"] === Ingest API for package org.elasticsearch.ingest.common -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-ingest-Processors]] ==== Processors diff --git a/docs/painless/painless-api-reference/painless-api-reference-moving-function/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-moving-function/index.asciidoc index 9d37e81a94fc7..93a88519d65ce 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-moving-function/index.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-moving-function/index.asciidoc @@ -7,6 +7,10 @@ The following specialized API is available in the Moving Function context. * See the <> for further API available in all contexts. +==== Classes By Package +The following classes are available grouped by their respective packages. Click on a class to view details about the available methods and fields. + + ==== org.elasticsearch.search.aggregations.pipeline <> diff --git a/docs/painless/painless-api-reference/painless-api-reference-moving-function/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-moving-function/packages.asciidoc index 824aa23f7ebfe..bdd8b1fd73ce2 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-moving-function/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-moving-function/packages.asciidoc @@ -3,7 +3,7 @@ [role="exclude",id="painless-api-reference-moving-function-org-elasticsearch-search-aggregations-pipeline"] === Moving Function API for package org.elasticsearch.search.aggregations.pipeline -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-moving-function-MovingFunctions]] ==== MovingFunctions diff --git a/docs/painless/painless-api-reference/painless-api-reference-score/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-score/index.asciidoc index fe9e0e1d23505..d355a495e0625 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-score/index.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-score/index.asciidoc @@ -7,6 +7,31 @@ The following specialized API is available in the Score context. * See the <> for further API available in all contexts. +==== Static Methods +The following methods are directly callable without a class/instance qualifier. Note parameters denoted by a (*) are treated as read-only values. + +* double cosineSimilarity(List *, VectorScriptDocValues.DenseVectorScriptDocValues) +* double cosineSimilaritySparse(Map *, VectorScriptDocValues.SparseVectorScriptDocValues) +* double decayDateExp(String *, String *, String *, double *, JodaCompatibleZonedDateTime) +* double decayDateGauss(String *, String *, String *, double *, JodaCompatibleZonedDateTime) +* double decayDateLinear(String *, String *, String *, double *, JodaCompatibleZonedDateTime) +* double decayGeoExp(String *, String *, String *, double *, GeoPoint) +* double decayGeoGauss(String *, String *, String *, double *, GeoPoint) +* double decayGeoLinear(String *, String *, String *, double *, GeoPoint) +* double decayNumericExp(double *, double *, double *, double *, double) +* double decayNumericGauss(double *, double *, double *, double *, double) +* double decayNumericLinear(double *, double *, double *, double *, double) +* double dotProduct(List, VectorScriptDocValues.DenseVectorScriptDocValues) +* double dotProductSparse(Map *, VectorScriptDocValues.SparseVectorScriptDocValues) +* double randomScore(int *) +* double randomScore(int *, String *) +* double saturation(double, double) +* double sigmoid(double, double, double) + +==== Classes By Package +The following classes are available grouped by their respective packages. Click on a class to view details about the available methods and fields. + + ==== org.elasticsearch.index.query <> diff --git a/docs/painless/painless-api-reference/painless-api-reference-score/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-score/packages.asciidoc index 287f7a223ca5e..10f0f1b6daeab 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-score/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-score/packages.asciidoc @@ -3,7 +3,7 @@ [role="exclude",id="painless-api-reference-score-org-elasticsearch-index-query"] === Score API for package org.elasticsearch.index.query -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-score-VectorScriptDocValues]] ==== VectorScriptDocValues diff --git a/docs/painless/painless-api-reference/painless-api-reference-shared/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-shared/index.asciidoc index c349602a7b580..d5452ce8fab96 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-shared/index.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-shared/index.asciidoc @@ -5,6 +5,10 @@ The following API is available in all contexts. +==== Classes By Package +The following classes are available grouped by their respective packages. Click on a class to view details about the available methods and fields. + + ==== java.lang <> diff --git a/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc index ed6e10e7b193c..f692141051200 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc @@ -3,7 +3,7 @@ [role="exclude",id="painless-api-reference-shared-java-lang"] === Shared API for package java.lang -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-Appendable]] ==== Appendable @@ -1399,7 +1399,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-math"] === Shared API for package java.math -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-BigDecimal]] ==== BigDecimal @@ -1557,7 +1557,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-text"] === Shared API for package java.text -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-Annotation]] ==== Annotation @@ -2265,7 +2265,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-time"] === Shared API for package java.time -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-Clock]] ==== Clock @@ -3078,7 +3078,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-time-chrono"] === Shared API for package java.time.chrono -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-AbstractChronology]] ==== AbstractChronology @@ -3675,7 +3675,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-time-format"] === Shared API for package java.time.format -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-DateTimeFormatter]] ==== DateTimeFormatter @@ -3874,7 +3874,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-time-temporal"] === Shared API for package java.time.temporal -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-ChronoField]] ==== ChronoField @@ -4166,7 +4166,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-time-zone"] === Shared API for package java.time.zone -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-ZoneOffsetTransition]] ==== ZoneOffsetTransition @@ -4265,7 +4265,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-util"] === Shared API for package java.util -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-AbstractCollection]] ==== AbstractCollection @@ -7194,7 +7194,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-util-function"] === Shared API for package java.util.function -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-BiConsumer]] ==== BiConsumer @@ -7582,7 +7582,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-util-regex"] === Shared API for package java.util.regex -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-Matcher]] ==== Matcher @@ -7635,7 +7635,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-util-stream"] === Shared API for package java.util.stream -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-BaseStream]] ==== BaseStream @@ -7957,7 +7957,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-apache-lucene-util"] === Shared API for package org.apache.lucene.util -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-BytesRef]] ==== BytesRef @@ -7974,7 +7974,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-common-geo"] === Shared API for package org.elasticsearch.common.geo -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-GeoPoint]] ==== GeoPoint @@ -7987,7 +7987,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-index-fielddata"] === Shared API for package org.elasticsearch.index.fielddata -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-ScriptDocValues-Booleans]] ==== ScriptDocValues.Booleans @@ -8386,7 +8386,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-index-mapper"] === Shared API for package org.elasticsearch.index.mapper -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-IpFieldMapper-IpFieldType-IpScriptDocValues]] ==== IpFieldMapper.IpFieldType.IpScriptDocValues @@ -8445,7 +8445,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-index-query"] === Shared API for package org.elasticsearch.index.query -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-IntervalFilterScript-Interval]] ==== IntervalFilterScript.Interval @@ -8459,7 +8459,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-index-similarity"] === Shared API for package org.elasticsearch.index.similarity -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-ScriptedSimilarity-Doc]] ==== ScriptedSimilarity.Doc @@ -8499,7 +8499,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-painless-api"] === Shared API for package org.elasticsearch.painless.api -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-Debug]] ==== Debug @@ -8511,7 +8511,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-script"] === Shared API for package org.elasticsearch.script -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-JodaCompatibleZonedDateTime]] ==== JodaCompatibleZonedDateTime @@ -8594,7 +8594,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-search-lookup"] === Shared API for package org.elasticsearch.search.lookup -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-FieldLookup]] ==== FieldLookup diff --git a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java index fbea8d91726c9..babc3e10e55db 100644 --- a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java +++ b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java @@ -24,10 +24,12 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.painless.action.PainlessContextClassBindingInfo; import org.elasticsearch.painless.action.PainlessContextClassInfo; import org.elasticsearch.painless.action.PainlessContextConstructorInfo; import org.elasticsearch.painless.action.PainlessContextFieldInfo; import org.elasticsearch.painless.action.PainlessContextInfo; +import org.elasticsearch.painless.action.PainlessContextInstanceBindingInfo; import org.elasticsearch.painless.action.PainlessContextMethodInfo; import java.io.IOException; @@ -69,26 +71,30 @@ public final class ContextDocGenerator { public static void main(String[] args) throws IOException { List contextInfos = getContextInfos(); - Set sharedClassInfos = createShared(contextInfos); + Set sharedStaticInfos = createSharedStatics(contextInfos); + Set sharedClassInfos = createSharedClasses(contextInfos); Path rootDir = resetRootDir(); Path sharedDir = createSharedDir(rootDir); - List classInfos = sortClassInfos(new ArrayList<>(sharedClassInfos), Collections.emptySet()); + List staticInfos = sortStaticInfos(Collections.emptySet(), new ArrayList<>(sharedStaticInfos)); + List classInfos = sortClassInfos(Collections.emptySet(), new ArrayList<>(sharedClassInfos)); Map javaNamesToDisplayNames = getDisplayNames(classInfos); - printSharedIndexPage(sharedDir, javaNamesToDisplayNames, classInfos); + printSharedIndexPage(sharedDir, javaNamesToDisplayNames, staticInfos, classInfos); printSharedPackagesPages(sharedDir, javaNamesToDisplayNames, classInfos); Set isSpecialized = new HashSet<>(); for (PainlessContextInfo contextInfo : contextInfos) { - Path contextDir = createContextDir(rootDir, contextInfo); - classInfos = sortClassInfos(new ArrayList<>(contextInfo.getClasses()), sharedClassInfos); + staticInfos = createContextStatics(contextInfo); + staticInfos = sortStaticInfos(sharedStaticInfos, staticInfos); + classInfos = sortClassInfos(sharedClassInfos, new ArrayList<>(contextInfo.getClasses())); - if (classInfos.isEmpty() == false) { + if (staticInfos.isEmpty() == false || classInfos.isEmpty() == false) { + Path contextDir = createContextDir(rootDir, contextInfo); isSpecialized.add(contextInfo); javaNamesToDisplayNames = getDisplayNames(contextInfo.getClasses()); - printContextIndexPage(contextDir, javaNamesToDisplayNames, sharedClassInfos, contextInfo, classInfos); + printContextIndexPage(contextDir, javaNamesToDisplayNames, contextInfo, staticInfos, classInfos); printContextPackagesPages(contextDir, javaNamesToDisplayNames, sharedClassInfos, contextInfo, classInfos); } } @@ -123,12 +129,44 @@ private static List getContextInfos() throws IOException { return contextInfos; } - private static Set createShared(List contextInfos) { + private static Set createSharedStatics(List contextInfos) { + Map staticInfoCounts = new HashMap<>(); + + for (PainlessContextInfo contextInfo : contextInfos) { + for (PainlessContextMethodInfo methodInfo : contextInfo.getImportedMethods()) { + staticInfoCounts.merge(methodInfo, 1, Integer::sum); + } + + for (PainlessContextClassBindingInfo classBindingInfo : contextInfo.getClassBindings()) { + staticInfoCounts.merge(classBindingInfo, 1, Integer::sum); + } + + for (PainlessContextInstanceBindingInfo instanceBindingInfo : contextInfo.getInstanceBindings()) { + staticInfoCounts.merge(instanceBindingInfo, 1, Integer::sum); + } + } + + return staticInfoCounts.entrySet().stream().filter( + e -> e.getValue() == contextInfos.size() + ).map(Map.Entry::getKey).collect(Collectors.toSet()); + } + + private static List createContextStatics(PainlessContextInfo contextInfo) { + List staticInfos = new ArrayList<>(); + + staticInfos.addAll(contextInfo.getImportedMethods()); + staticInfos.addAll(contextInfo.getClassBindings()); + staticInfos.addAll(contextInfo.getInstanceBindings()); + + return staticInfos; + } + + private static Set createSharedClasses(List contextInfos) { Map classInfoCounts = new HashMap<>(); for (PainlessContextInfo contextInfo : contextInfos) { for (PainlessContextClassInfo classInfo : contextInfo.getClasses()) { - classInfoCounts.compute(classInfo, (k, v) -> v == null ? 1 : v + 1); + classInfoCounts.merge(classInfo, 1, Integer::sum); } } @@ -165,8 +203,8 @@ private static void printAutomatedMessage(PrintStream stream) { stream.println(); } - private static void printSharedIndexPage( - Path sharedDir, Map javaNamesToDisplayNames, List classInfos) throws IOException { + private static void printSharedIndexPage(Path sharedDir, Map javaNamesToDisplayNames, + List staticInfos, List classInfos) throws IOException { Path sharedIndexPath = sharedDir.resolve("index.asciidoc"); @@ -181,13 +219,12 @@ private static void printSharedIndexPage( sharedIndexStream.println(); sharedIndexStream.println("The following API is available in all contexts."); - printIndex(sharedIndexStream, SHARED_HEADER, javaNamesToDisplayNames, Collections.emptySet(), classInfos); + printIndex(sharedIndexStream, SHARED_HEADER, javaNamesToDisplayNames, staticInfos, classInfos); } } private static void printContextIndexPage(Path contextDir, Map javaNamesToDisplayNames, - Set excludes, PainlessContextInfo contextInfo, List classInfos) - throws IOException { + PainlessContextInfo contextInfo, List staticInfos, List classInfos) throws IOException { Path contextIndexPath = contextDir.resolve("index.asciidoc"); @@ -205,34 +242,58 @@ private static void printContextIndexPage(Path contextDir, Map j contextIndexStream.println( "* See the <<" + SHARED_HEADER + ", " + SHARED_NAME + " API>> for further API available in all contexts."); - printIndex(contextIndexStream, getContextHeader(contextInfo), javaNamesToDisplayNames, excludes, classInfos); + printIndex(contextIndexStream, getContextHeader(contextInfo), javaNamesToDisplayNames, staticInfos, classInfos); } } private static void printIndex(PrintStream indexStream, String contextHeader, Map javaNamesToDisplayNames, - Set excludes, List classInfos) { + List staticInfos, List classInfos) { String currentPackageName = null; - for (PainlessContextClassInfo classInfo : classInfos) { - if (excludes.contains(classInfo)) { - continue; + if (staticInfos.isEmpty() == false) { + indexStream.println(); + indexStream.println("==== Static Methods"); + indexStream.println("The following methods are directly callable without a class/instance qualifier. " + + "Note parameters denoted by a (*) are treated as read-only values."); + indexStream.println(); + + for (Object staticInfo : staticInfos) { + if (staticInfo instanceof PainlessContextMethodInfo) { + printMethod(indexStream, javaNamesToDisplayNames, false, (PainlessContextMethodInfo)staticInfo); + } else if (staticInfo instanceof PainlessContextClassBindingInfo) { + printClassBinding(indexStream, javaNamesToDisplayNames, (PainlessContextClassBindingInfo)staticInfo); + } else if (staticInfo instanceof PainlessContextInstanceBindingInfo) { + printInstanceBinding(indexStream, javaNamesToDisplayNames, (PainlessContextInstanceBindingInfo)staticInfo); + } else { + throw new IllegalArgumentException("unexpected static info type"); + } } + } - String classPackageName = classInfo.getName().substring(0, classInfo.getName().lastIndexOf('.')); + if (classInfos.isEmpty() == false) { + indexStream.println(); + indexStream.println("==== Classes By Package"); + indexStream.println("The following classes are available grouped by their respective packages. Click on a class " + + "to view details about the available methods and fields."); + indexStream.println(); - if (classPackageName.equals(currentPackageName) == false) { - currentPackageName = classPackageName; + for (PainlessContextClassInfo classInfo : classInfos) { + String classPackageName = classInfo.getName().substring(0, classInfo.getName().lastIndexOf('.')); - indexStream.println(); - indexStream.println("==== " + currentPackageName); - indexStream.println("<<" + getPackageHeader(contextHeader, currentPackageName) + ", " + - "Expand details for " + currentPackageName + ">>"); - indexStream.println(); - } + if (classPackageName.equals(currentPackageName) == false) { + currentPackageName = classPackageName; - String className = getType(javaNamesToDisplayNames, classInfo.getName()); - indexStream.println("* <<" + getClassHeader(contextHeader, className) + ", " + className + ">>"); + indexStream.println(); + indexStream.println("==== " + currentPackageName); + indexStream.println("<<" + getPackageHeader(contextHeader, currentPackageName) + ", " + + "Expand details for " + currentPackageName + ">>"); + indexStream.println(); + } + + String className = getType(javaNamesToDisplayNames, classInfo.getName()); + indexStream.println("* <<" + getClassHeader(contextHeader, className) + ", " + className + ">>"); + } } indexStream.println(); @@ -289,8 +350,8 @@ private static void printPackages(PrintStream packagesStream, String contextName packagesStream.println(); packagesStream.println("[role=\"exclude\",id=\"" + getPackageHeader(contextHeader, currentPackageName) + "\"]"); packagesStream.println("=== " + contextName + " API for package " + currentPackageName); - packagesStream.println( - "See the <<" + contextHeader + ", " + contextName + " API>> for a high-level overview of all packages."); + packagesStream.println("See the <<" + contextHeader + ", " + contextName + " API>> " + + "for a high-level overview of all packages and classes."); } String className = getType(javaNamesToDisplayNames, classInfo.getName()); @@ -421,6 +482,49 @@ private static void printMethod( stream.println(")"); } + private static void printClassBinding( + PrintStream stream, Map javaNamesToDisplayNames, PainlessContextClassBindingInfo classBindingInfo) { + + stream.print("* " + getType(javaNamesToDisplayNames, classBindingInfo.getRtn()) + " " + classBindingInfo.getName() + "("); + + for (int parameterIndex = 0; parameterIndex < classBindingInfo.getParameters().size(); ++parameterIndex) { + // temporary fix to not print org.elasticsearch.script.ScoreScript parameter until + // class instance bindings are created and the information is appropriately added to the context info classes + if ("org.elasticsearch.script.ScoreScript".equals( + getType(javaNamesToDisplayNames, classBindingInfo.getParameters().get(parameterIndex)))) { + continue; + } + + stream.print(getType(javaNamesToDisplayNames, classBindingInfo.getParameters().get(parameterIndex))); + + if (parameterIndex < classBindingInfo.getReadOnly()) { + stream.print(" *"); + } + + if (parameterIndex + 1 < classBindingInfo.getParameters().size()) { + stream.print(", "); + } + } + + stream.println(")"); + } + + private static void printInstanceBinding( + PrintStream stream, Map javaNamesToDisplayNames, PainlessContextInstanceBindingInfo instanceBindingInfo) { + + stream.print("* " + getType(javaNamesToDisplayNames, instanceBindingInfo.getRtn()) + " " + instanceBindingInfo.getName() + "("); + + for (int parameterIndex = 0; parameterIndex < instanceBindingInfo.getParameters().size(); ++parameterIndex) { + stream.print(getType(javaNamesToDisplayNames, instanceBindingInfo.getParameters().get(parameterIndex))); + + if (parameterIndex + 1 < instanceBindingInfo.getParameters().size()) { + stream.print(", "); + } + } + + stream.println(")"); + } + private static void printField( PrintStream stream, Map javaNamesToDisplayNames, boolean isStatic, PainlessContextFieldInfo fieldInfo) { @@ -602,15 +706,50 @@ private static String getContextName(PainlessContextInfo contextInfo) { return contextNameBuilder.substring(0, contextNameBuilder.length() - 1); } + private static List sortStaticInfos(Set staticExcludes, List staticInfos) { + staticInfos = new ArrayList<>(staticInfos); + staticInfos.removeIf(staticExcludes::contains); + + staticInfos.sort((si1, si2) -> { + String sv1; + String sv2; + + if (si1 instanceof PainlessContextMethodInfo) { + sv1 = ((PainlessContextMethodInfo)si1).getSortValue(); + } else if (si1 instanceof PainlessContextClassBindingInfo) { + sv1 = ((PainlessContextClassBindingInfo)si1).getSortValue(); + } else if (si1 instanceof PainlessContextInstanceBindingInfo) { + sv1 = ((PainlessContextInstanceBindingInfo)si1).getSortValue(); + } else { + throw new IllegalArgumentException("unexpected static info type"); + } + + if (si2 instanceof PainlessContextMethodInfo) { + sv2 = ((PainlessContextMethodInfo)si2).getSortValue(); + } else if (si2 instanceof PainlessContextClassBindingInfo) { + sv2 = ((PainlessContextClassBindingInfo)si2).getSortValue(); + } else if (si2 instanceof PainlessContextInstanceBindingInfo) { + sv2 = ((PainlessContextInstanceBindingInfo)si2).getSortValue(); + } else { + throw new IllegalArgumentException("unexpected static info type"); + } + + return sv1.compareTo(sv2); + }); + + return staticInfos; + } + private static List sortClassInfos( - List classInfos, Set excludes) { + Set classExcludes, List classInfos) { + classInfos = new ArrayList<>(classInfos); classInfos.removeIf(v -> "void".equals(v.getName()) || "boolean".equals(v.getName()) || "byte".equals(v.getName()) || "short".equals(v.getName()) || "char".equals(v.getName()) || "int".equals(v.getName()) || "long".equals(v.getName()) || "float".equals(v.getName()) || "double".equals(v.getName()) || "org.elasticsearch.painless.lookup.def".equals(v.getName()) || - isInternalClass(v.getName()) || excludes.contains(v) + isInternalClass(v.getName()) || classExcludes.contains(v) ); classInfos.sort((c1, c2) -> { From 7334945de27ebc6623ffa54994325548b21ffd51 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 8 May 2019 16:00:53 -0400 Subject: [PATCH 025/321] Mute illegal interval rollup tests Awaits fixing: https://github.com/elastic/elasticsearch/issues/41970 --- .../resources/rest-api-spec/test/rollup/rollup_search.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index ca04327eab729..cc5b778223379 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -1246,7 +1246,9 @@ setup: --- "Search error against live index": - + - skip: + version: "all" + reason: "AwaitsFix: https://github.com/elastic/elasticsearch/issues/41970" - do: catch: bad_request rollup.rollup_search: @@ -1262,7 +1264,9 @@ setup: --- "Search error against rollup and live index": - + - skip: + version: "all" + reason: "AwaitsFix: https://github.com/elastic/elasticsearch/issues/41970" - do: catch: bad_request rollup.rollup_search: From a3aacc359f98953f444719a50522fc217cbf014a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 8 May 2019 22:03:11 +0200 Subject: [PATCH 026/321] Remove Version 6 pre-release constants (#41517) Now that master is 8.0, we can remove uses of these constants and the backcompat code that uses them, since 8 will always walk to 7.0+ nodes. This PR starts by removing the pre-6 release constants, remove obsolete code and replacing its occurances in tests where needed. Relates to #41164 --- .../main/java/org/elasticsearch/Version.java | 34 +---------- .../metadata/MetaDataCreateIndexService.java | 10 +--- .../cluster/routing/OperationRouting.java | 12 +--- .../elasticsearch/index/IndexSortConfig.java | 10 ---- .../index/mapper/DynamicTemplate.java | 14 +---- .../index/seqno/ReplicationTracker.java | 19 ++----- .../elasticsearch/index/shard/IndexShard.java | 6 +- .../index/shard/ShardSplittingQuery.java | 4 -- .../index/shard/StoreRecovery.java | 4 +- .../index/translog/Translog.java | 13 +---- .../cluster/IndicesClusterStateService.java | 26 ++------- .../java/org/elasticsearch/VersionTests.java | 1 + .../admin/indices/create/SplitIndexIT.java | 3 +- .../MetaDataCreateIndexServiceTests.java | 9 +-- .../MetaDataIndexUpgradeServiceTests.java | 2 +- .../allocation/FailedNodeRoutingTests.java | 2 +- .../allocation/FailedShardsRoutingTests.java | 6 +- .../index/analysis/AnalysisRegistryTests.java | 2 +- .../index/engine/InternalEngineTests.java | 3 +- .../index/engine/NoOpEngineTests.java | 2 +- .../index/mapper/DynamicTemplateTests.java | 18 +++--- .../query/SimpleQueryStringBuilderTests.java | 4 -- ...ReplicationTrackerRetentionLeaseTests.java | 33 ++++------- .../index/seqno/ReplicationTrackerTests.java | 56 +++++++++---------- .../index/shard/IndexShardTests.java | 8 +-- .../shard/PrimaryReplicaSyncerTests.java | 4 +- ...actIndicesClusterStateServiceTestCase.java | 3 +- .../recovery/StartRecoveryRequestTests.java | 6 +- .../ESIndexLevelReplicationTestCase.java | 6 +- .../index/shard/IndexShardTestCase.java | 10 ++-- .../elasticsearch/test/VersionUtilsTests.java | 22 ++++---- .../engine/FollowEngineIndexShardTests.java | 2 +- .../xpack/core/ml/MlMetadata.java | 2 +- .../core/security/authc/TokenMetaData.java | 2 +- .../ClusterStatsMonitoringDocTests.java | 5 +- .../IndexRecoveryMonitoringDocTests.java | 2 +- 36 files changed, 114 insertions(+), 251 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 49c5509ca1c18..c2d927f457bd1 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -46,24 +46,6 @@ public class Version implements Comparable, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - public static final int V_6_0_0_alpha1_ID = 6000001; - public static final Version V_6_0_0_alpha1 = - new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); - public static final int V_6_0_0_alpha2_ID = 6000002; - public static final Version V_6_0_0_alpha2 = - new Version(V_6_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); - public static final int V_6_0_0_beta1_ID = 6000026; - public static final Version V_6_0_0_beta1 = - new Version(V_6_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); - public static final int V_6_0_0_beta2_ID = 6000027; - public static final Version V_6_0_0_beta2 = - new Version(V_6_0_0_beta2_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); - public static final int V_6_0_0_rc1_ID = 6000051; - public static final Version V_6_0_0_rc1 = - new Version(V_6_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); - public static final int V_6_0_0_rc2_ID = 6000052; - public static final Version V_6_0_0_rc2 = - new Version(V_6_0_0_rc2_ID, org.apache.lucene.util.Version.LUCENE_7_0_1); public static final int V_6_0_0_ID = 6000099; public static final Version V_6_0_0 = new Version(V_6_0_0_ID, org.apache.lucene.util.Version.LUCENE_7_0_1); @@ -234,18 +216,6 @@ public static Version fromId(int id) { return V_6_0_1; case V_6_0_0_ID: return V_6_0_0; - case V_6_0_0_rc2_ID: - return V_6_0_0_rc2; - case V_6_0_0_beta2_ID: - return V_6_0_0_beta2; - case V_6_0_0_rc1_ID: - return V_6_0_0_rc1; - case V_6_0_0_beta1_ID: - return V_6_0_0_beta1; - case V_6_0_0_alpha2_ID: - return V_6_0_0_alpha2; - case V_6_0_0_alpha1_ID: - return V_6_0_0_alpha1; case V_EMPTY_ID: return V_EMPTY; default: @@ -445,7 +415,7 @@ public Version minimumCompatibilityVersion() { return bwcVersion == null ? this : bwcVersion; } - return Version.min(this, fromId((int) major * 1000000 + 0 * 10000 + 99)); + return Version.min(this, fromId(major * 1000000 + 0 * 10000 + 99)); } /** @@ -457,8 +427,6 @@ public Version minimumIndexCompatibilityVersion() { final int bwcMajor; if (major == 5) { bwcMajor = 2; // we jumped from 2 to 5 - } else if (major == 7) { - return V_6_0_0_beta1; } else { bwcMajor = major - 1; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 2e18e5aeae50a..1c9794191bf6e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -20,8 +20,9 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.apache.logging.log4j.Logger; + import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; @@ -714,13 +715,6 @@ static void validateSplitIndex(ClusterState state, String sourceIndex, Settings targetIndexSettings) { IndexMetaData sourceMetaData = validateResize(state, sourceIndex, targetIndexMappingsTypes, targetIndexName, targetIndexSettings); IndexMetaData.selectSplitShard(0, sourceMetaData, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); - if (sourceMetaData.getCreationVersion().before(Version.V_6_0_0_alpha1)) { - // ensure we have a single type since this would make the splitting code considerably more complex - // and a 5.x index would not be splittable unless it has been shrunk before so rather opt out of the complexity - // since in 5.x we don't have a setting to artificially set the number of routing shards - throw new IllegalStateException("source index created version is too old to apply a split operation"); - } - } static IndexMetaData validateResize(ClusterState state, String sourceIndex, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 08dc3d1d70971..9d0a081af4cbf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -194,16 +193,7 @@ private ShardIterator preferenceActiveShardIterator(IndexShardRoutingTable index } } // if not, then use it as the index - int routingHash = Murmur3HashFunction.hash(preference); - if (nodes.getMinNodeVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - // The AllocationService lists shards in a fixed order based on nodes - // so earlier versions of this class would have a tendency to - // select the same node across different shardIds. - // Better overall balancing can be achieved if each shardId opts - // for a different element in the list by also incorporating the - // shard ID into the hash of the user-supplied preference key. - routingHash = 31 * routingHash + indexShard.shardId.hashCode(); - } + int routingHash = 31 * Murmur3HashFunction.hash(preference) + indexShard.shardId.hashCode(); if (awarenessAttributes.isEmpty()) { return indexShard.activeInitializingShardsIt(routingHash); } else { diff --git a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java index a7fe19928762f..65e39e34a3e73 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; -import org.elasticsearch.Version; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -120,15 +119,6 @@ public IndexSortConfig(IndexSettings indexSettings) { .map((name) -> new FieldSortSpec(name)) .toArray(FieldSortSpec[]::new); - if (sortSpecs.length > 0 && indexSettings.getIndexVersionCreated().before(Version.V_6_0_0_alpha1)) { - /** - * This index might be assigned to a node where the index sorting feature is not available - * (ie. versions prior to {@link Version.V_6_0_0_alpha1_UNRELEASED}) so we must fail here rather than later. - */ - throw new IllegalArgumentException("unsupported index.version.created:" + indexSettings.getIndexVersionCreated() + - ", can't set index.sort on versions prior to " + Version.V_6_0_0_alpha1); - } - if (INDEX_SORT_ORDER_SETTING.exists(settings)) { List orders = INDEX_SORT_ORDER_SETTING.get(settings); if (orders.size() != sortSpecs.length) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java index aafe9f6ba03de..30c9606acd928 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java @@ -202,19 +202,7 @@ public static DynamicTemplate parse(String name, Map conf, XContentFieldType xcontentFieldType = null; if (matchMappingType != null && matchMappingType.equals("*") == false) { - try { - xcontentFieldType = XContentFieldType.fromString(matchMappingType); - } catch (IllegalArgumentException e) { - if (indexVersionCreated.onOrAfter(Version.V_6_0_0_alpha1)) { - throw e; - } else { - deprecationLogger.deprecated("match_mapping_type [" + matchMappingType + "] is invalid and will be ignored: " - + e.getMessage()); - // this template is on an unknown type so it will never match anything - // null indicates that the template should be ignored - return null; - } - } + xcontentFieldType = XContentFieldType.fromString(matchMappingType); } final MatchType matchType = MatchType.fromString(matchPattern); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 61beddb776c91..437e7934088e7 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.ObjectLongHashMap; import com.carrotsearch.hppc.ObjectLongMap; + import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -758,10 +759,9 @@ public synchronized void activatePrimaryMode(final long localCheckpoint) { * @param applyingClusterStateVersion the cluster state version being applied when updating the allocation IDs from the master * @param inSyncAllocationIds the allocation IDs of the currently in-sync shard copies * @param routingTable the shard routing table - * @param pre60AllocationIds the allocation IDs of shards that are allocated to pre-6.0 nodes */ public synchronized void updateFromMaster(final long applyingClusterStateVersion, final Set inSyncAllocationIds, - final IndexShardRoutingTable routingTable, final Set pre60AllocationIds) { + final IndexShardRoutingTable routingTable) { assert invariant(); if (applyingClusterStateVersion > appliedClusterStateVersion) { // check that the master does not fabricate new in-sync entries out of thin air once we are in primary mode @@ -782,8 +782,7 @@ public synchronized void updateFromMaster(final long applyingClusterStateVersion final boolean inSync = inSyncAllocationIds.contains(initializingId); assert inSync == false : "update from master in primary mode has " + initializingId + " as in-sync but it does not exist locally"; - final long localCheckpoint = pre60AllocationIds.contains(initializingId) ? - SequenceNumbers.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, inSync, inSync)); } @@ -794,8 +793,7 @@ public synchronized void updateFromMaster(final long applyingClusterStateVersion } else { for (String initializingId : initializingAllocationIds) { if (shardAllocationId.equals(initializingId) == false) { - final long localCheckpoint = pre60AllocationIds.contains(initializingId) ? - SequenceNumbers.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, false, false)); } @@ -807,8 +805,7 @@ public synchronized void updateFromMaster(final long applyingClusterStateVersion checkpointState.inSync = true; checkpointState.tracked = true; } else { - final long localCheckpoint = pre60AllocationIds.contains(inSyncId) ? - SequenceNumbers.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; checkpoints.put(inSyncId, new CheckpointState(localCheckpoint, globalCheckpoint, true, true)); } @@ -1082,17 +1079,13 @@ private Runnable getMasterUpdateOperationFromCurrentState() { assert primaryMode == false; final long lastAppliedClusterStateVersion = appliedClusterStateVersion; final Set inSyncAllocationIds = new HashSet<>(); - final Set pre60AllocationIds = new HashSet<>(); checkpoints.entrySet().forEach(entry -> { if (entry.getValue().inSync) { inSyncAllocationIds.add(entry.getKey()); } - if (entry.getValue().getLocalCheckpoint() == SequenceNumbers.PRE_60_NODE_CHECKPOINT) { - pre60AllocationIds.add(entry.getKey()); - } }); final IndexShardRoutingTable lastAppliedRoutingTable = routingTable; - return () -> updateFromMaster(lastAppliedClusterStateVersion, inSyncAllocationIds, lastAppliedRoutingTable, pre60AllocationIds); + return () -> updateFromMaster(lastAppliedClusterStateVersion, inSyncAllocationIds, lastAppliedRoutingTable); } /** diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index ee67597efe31a..11e4fb81d9fbe 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.shard; import com.carrotsearch.hppc.ObjectLongMap; + import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; @@ -433,8 +434,7 @@ public void updateShardState(final ShardRouting newRouting, final BiConsumer> primaryReplicaSyncer, final long applyingClusterStateVersion, final Set inSyncAllocationIds, - final IndexShardRoutingTable routingTable, - final Set pre60AllocationIds) throws IOException { + final IndexShardRoutingTable routingTable) throws IOException { final ShardRouting currentRouting; synchronized (mutex) { currentRouting = this.shardRouting; @@ -453,7 +453,7 @@ public void updateShardState(final ShardRouting newRouting, } if (newRouting.primary()) { - replicationTracker.updateFromMaster(applyingClusterStateVersion, inSyncAllocationIds, routingTable, pre60AllocationIds); + replicationTracker.updateFromMaster(applyingClusterStateVersion, inSyncAllocationIds, routingTable); } if (state == IndexShardState.POST_RECOVERY && newRouting.active()) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index fb33cceaa49d8..dae910e5fe3c4 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -65,10 +65,6 @@ final class ShardSplittingQuery extends Query { private final BitSetProducer nestedParentBitSetProducer; ShardSplittingQuery(IndexMetaData indexMetaData, int shardId, boolean hasNested) { - if (indexMetaData.getCreationVersion().before(Version.V_6_0_0_rc2)) { - throw new IllegalArgumentException("Splitting query can only be executed on an index created with version " - + Version.V_6_0_0_rc2 + " or higher"); - } this.indexMetaData = indexMetaData; this.shardId = shardId; this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetaData.getCreationVersion()) : null; diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index c97c19eb0f3ec..0e87b9e2357e5 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.shard; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -31,7 +32,6 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.RecoverySource; @@ -119,8 +119,6 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate Sort indexSort = indexShard.getIndexSort(); final boolean hasNested = indexShard.mapperService().hasNested(); final boolean isSplit = sourceMetaData.getNumberOfShards() < indexShard.indexSettings().getNumberOfShards(); - assert isSplit == false || sourceMetaData.getCreationVersion().onOrAfter(Version.V_6_0_0_alpha1) : "for split we require a " + - "single type but the index is created before 6.0.0"; return executeRecovery(indexShard, () -> { logger.debug("starting recovery from local shards {}", shards); try { diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 841201b321549..9a1e657199938 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.Term; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.Version; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -216,15 +215,8 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws try (ReleasableLock lock = writeLock.acquire()) { logger.debug("open uncommitted translog checkpoint {}", checkpoint); - final long minGenerationToRecoverFrom; - if (checkpoint.minTranslogGeneration < 0) { - final Version indexVersionCreated = indexSettings().getIndexVersionCreated(); - assert indexVersionCreated.before(Version.V_6_0_0_beta1) : - "no minTranslogGeneration in checkpoint, but index was created with version [" + indexVersionCreated + "]"; - minGenerationToRecoverFrom = deletionPolicy.getMinTranslogGenerationForRecovery(); - } else { - minGenerationToRecoverFrom = checkpoint.minTranslogGeneration; - } + final long minGenerationToRecoverFrom = checkpoint.minTranslogGeneration; + assert minGenerationToRecoverFrom >= 0 : "minTranslogGeneration should be non-negative"; final String checkpointTranslogFile = getFilename(checkpoint.generation); // we open files in reverse order in order to validate tranlsog uuid before we start traversing the translog based on @@ -882,6 +874,7 @@ public Location(long generation, long translogLocation, int size) { this.size = size; } + @Override public String toString() { return "[generation: " + generation + ", location: " + translogLocation + ", size: " + size + "]"; } diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 821e095fc20b0..1ce39e283b1da 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -24,7 +24,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -35,7 +34,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RecoverySource.Type; import org.elasticsearch.cluster.routing.RoutingNode; @@ -94,8 +92,6 @@ import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; -import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.CLOSED; import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; @@ -149,7 +145,7 @@ public IndicesClusterStateService( final RetentionLeaseBackgroundSyncAction retentionLeaseBackgroundSyncAction) { this( settings, - (AllocatedIndices>) indicesService, + indicesService, clusterService, threadPool, recoveryTargetService, @@ -630,21 +626,8 @@ private void updateShard(DiscoveryNodes nodes, ShardRouting shardRouting, Shard primaryTerm = indexMetaData.primaryTerm(shard.shardId().id()); final Set inSyncIds = indexMetaData.inSyncAllocationIds(shard.shardId().id()); final IndexShardRoutingTable indexShardRoutingTable = routingTable.shardRoutingTable(shardRouting.shardId()); - final Set pre60AllocationIds = indexShardRoutingTable.assignedShards() - .stream() - .flatMap(shr -> { - if (shr.relocating()) { - return Stream.of(shr, shr.getTargetRelocatingShard()); - } else { - return Stream.of(shr); - } - }) - .filter(shr -> nodes.get(shr.currentNodeId()).getVersion().before(Version.V_6_0_0_alpha1)) - .map(ShardRouting::allocationId) - .map(AllocationId::getId) - .collect(Collectors.toSet()); shard.updateShardState(shardRouting, primaryTerm, primaryReplicaSyncer::resync, clusterState.version(), - inSyncIds, indexShardRoutingTable, pre60AllocationIds); + inSyncIds, indexShardRoutingTable); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed updating shard routing entry", e, clusterState); return; @@ -810,7 +793,7 @@ public interface Shard { * - Updates and persists the new routing value. * - Updates the primary term if this shard is a primary. * - Updates the allocation ids that are tracked by the shard if it is a primary. - * See {@link ReplicationTracker#updateFromMaster(long, Set, IndexShardRoutingTable, Set)} for details. + * See {@link ReplicationTracker#updateFromMaster(long, Set, IndexShardRoutingTable)} for details. * * @param shardRouting the new routing entry * @param primaryTerm the new primary term @@ -826,8 +809,7 @@ void updateShardState(ShardRouting shardRouting, BiConsumer> primaryReplicaSyncer, long applyingClusterStateVersion, Set inSyncAllocationIds, - IndexShardRoutingTable routingTable, - Set pre60AllocationIds) throws IOException; + IndexShardRoutingTable routingTable) throws IOException; } public interface AllocatedIndex extends Iterable, IndexComponent { diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index c04f131eab063..21a18e4a26ba5 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -102,6 +102,7 @@ public void testMax() { public void testMinimumIndexCompatibilityVersion() { assertEquals(Version.fromId(5000099), Version.fromId(6000099).minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(2000099), Version.fromId(5000099).minimumIndexCompatibilityVersion()); assertEquals(Version.fromId(2000099), Version.fromId(5010000).minimumIndexCompatibilityVersion()); assertEquals(Version.fromId(2000099), diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 7038505ff6fb3..05d1c5dcd803f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -367,7 +367,8 @@ private static IndexMetaData indexMetaData(final Client client, final String ind public void testCreateSplitIndex() { internalCluster().ensureAtLeastNumDataNodes(2); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_rc2, Version.CURRENT); + + Version version = VersionUtils.randomIndexCompatibleVersion(random()); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) .put("number_of_shards", 1) .put("index.version.created", version) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index e3ba62b3b79ab..f83d0aa783c24 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -100,13 +100,6 @@ public static boolean isSplitable(int source, int target) { } public void testNumberOfShards() { - { - final Version versionCreated = VersionUtils.randomVersionBetween( - random(), - Version.V_6_0_0_alpha1, VersionUtils.getPreviousVersion(Version.V_7_0_0)); - final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated); - assertThat(MetaDataCreateIndexService.IndexCreationTask.getNumberOfShards(indexSettingsBuilder), equalTo(5)); - } { final Version versionCreated = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT); final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated); @@ -466,7 +459,7 @@ public void testCalculateNumRoutingShards() { double ratio = numRoutingShards / randomNumShards; int intRatio = (int) ratio; - assertEquals(ratio, (double)(intRatio), 0.0d); + assertEquals(ratio, intRatio, 0.0d); assertTrue(1 < ratio); assertTrue(ratio <= 1024); assertEquals(0, intRatio % 2); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java index 4c1ba0ff77e34..3724f47537428 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java @@ -165,7 +165,7 @@ public static IndexMetaData newIndexMeta(String name, Settings indexSettings) { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_CREATION_DATE, 1) .put(IndexMetaData.SETTING_INDEX_UUID, "BOOM") - .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_6_0_0_alpha1) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT.minimumIndexCompatibilityVersion()) .put(indexSettings) .build(); return IndexMetaData.builder(name).settings(build).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 71a31b320591a..2e56ae6297b89 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -230,7 +230,7 @@ protected DiscoveryNode createNode(DiscoveryNode.Role... mustHaveRoles) { } final String id = String.format(Locale.ROOT, "node_%03d", nodeIdGenerator.incrementAndGet()); return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, - VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, null)); + VersionUtils.randomIndexCompatibleVersion(random())); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index c2d6a67468f3f..889be132d45d9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -633,8 +633,10 @@ public void testReplicaOnNewestVersionIsPromoted() { clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder(clusterState.nodes()) - .add(newNode("node3-6.x", VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, null))) - .add(newNode("node4-6.x", VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, null)))) + .add(newNode("node3-old", + VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumIndexCompatibilityVersion(), null))) + .add(newNode("node4-old", + VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumIndexCompatibilityVersion(), null)))) .build(); // start all the replicas diff --git a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index b836a5d0372b8..d75b359863137 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -129,7 +129,7 @@ public TokenStream create(TokenStream tokenStream) { } public void testOverrideDefaultIndexAnalyzerIsUnsupported() { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, Version.CURRENT); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); AnalyzerProvider defaultIndex = new PreBuiltAnalyzerProvider("default_index", AnalyzerScope.INDEX, new EnglishAnalyzer()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index ae11500e54e5e..82c4035cfa7db 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -2290,7 +2291,7 @@ public void testSeqNoAndCheckpoints() throws IOException { ReplicationTracker gcpTracker = (ReplicationTracker) initialEngine.config().getGlobalCheckpointSupplier(); gcpTracker.updateFromMaster(1L, new HashSet<>(Arrays.asList(primary.allocationId().getId(), replica.allocationId().getId())), - new IndexShardRoutingTable.Builder(shardId).addShard(primary).addShard(replica).build(), Collections.emptySet()); + new IndexShardRoutingTable.Builder(shardId).addShard(primary).addShard(replica).build()); gcpTracker.activatePrimaryMode(primarySeqNo); for (int op = 0; op < opCount; op++) { final String id; diff --git a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java index 3a857a2046842..f03500e6e1250 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java @@ -75,7 +75,7 @@ public void testNoopAfterRegularEngine() throws IOException { ShardRouting routing = TestShardRouting.newShardRouting("test", shardId.id(), "node", null, true, ShardRoutingState.STARTED, allocationId); IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(shardId).addShard(routing).build(); - tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table, Collections.emptySet()); + tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table); tracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); for (int i = 0; i < docs; i++) { ParsedDocument doc = testParsedDocument("" + i, null, testDocumentWithTextField(), B_1, null); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java index a910c2c86bab8..5604f4240ce53 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java @@ -40,7 +40,7 @@ public void testParseUnknownParam() throws Exception { templateDef.put("random_param", "random_value"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1)); + () -> DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion())); assertEquals("Illegal dynamic template parameter: [random_param]", e.getMessage()); } @@ -50,7 +50,7 @@ public void testParseUnknownMatchType() { templateDef2.put("mapping", Collections.singletonMap("store", true)); // if a wrong match type is specified, we ignore the template IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef2, Version.V_6_0_0_alpha1)); + () -> DynamicTemplate.parse("my_template", templateDef2, Version.CURRENT.minimumIndexCompatibilityVersion())); assertEquals("No field type matched on [text], possible values are [object, string, long, double, boolean, date, binary]", e.getMessage()); } @@ -63,7 +63,7 @@ public void testParseInvalidRegex() { templateDef.put("match_pattern", "regex"); templateDef.put("mapping", Collections.singletonMap("store", true)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef, Version.V_6_3_0)); + () -> DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion())); assertEquals("Pattern [*a] of type [regex] is invalid. Cannot create dynamic template [my_template].", e.getMessage()); } } @@ -72,7 +72,7 @@ public void testMatchAllTemplate() { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "*"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); assertTrue(template.match("a.b", "b", randomFrom(XContentFieldType.values()))); } @@ -80,7 +80,7 @@ public void testMatchTypeTemplate() { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); assertTrue(template.match("a.b", "b", XContentFieldType.STRING)); assertFalse(template.match("a.b", "b", XContentFieldType.BOOLEAN)); } @@ -90,7 +90,7 @@ public void testSerialization() throws Exception { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); XContentBuilder builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match_mapping_type\":\"string\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -100,7 +100,7 @@ public void testSerialization() throws Exception { templateDef.put("match", "*name"); templateDef.put("unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"*name\",\"unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -110,7 +110,7 @@ public void testSerialization() throws Exception { templateDef.put("path_match", "*name"); templateDef.put("path_unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"path_match\":\"*name\",\"path_unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", @@ -121,7 +121,7 @@ public void testSerialization() throws Exception { templateDef.put("match", "^a$"); templateDef.put("match_pattern", "regex"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.V_6_0_0_alpha1); + template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"^a$\",\"match_pattern\":\"regex\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index daed696f02fd9..0adac9db8287e 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -39,7 +39,6 @@ import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.util.TestUtil; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.search.SimpleQueryStringQueryParser; @@ -239,9 +238,6 @@ public void testFieldsCannotBeSetToNull() { } public void testDefaultFieldParsing() throws IOException { - assumeTrue("5.x behaves differently, so skip on non-6.x indices", - indexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)); - String query = randomAlphaOfLengthBetween(1, 10).toLowerCase(Locale.ROOT); String contentString = "{\n" + " \"simple_query_string\" : {\n" + diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index 178df2eac899e..d1bd5712dbadc 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -70,8 +70,7 @@ public void testAddOrRenewRetentionLease() { replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); final long[] minimumRetainingSequenceNumbers = new long[length]; @@ -112,8 +111,7 @@ public void testAddDuplicateRetentionLease() { replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final String id = randomAlphaOfLength(8); final long retainingSequenceNumber = randomNonNegativeLong(); @@ -141,8 +139,7 @@ public void testRenewNotFoundRetentionLease() { replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final String id = randomAlphaOfLength(8); final RetentionLeaseNotFoundException e = expectThrows( @@ -178,8 +175,7 @@ public void testAddRetentionLeaseCausesRetentionLeaseSync() { replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); @@ -213,8 +209,7 @@ public void testRemoveRetentionLease() { replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); final long[] minimumRetainingSequenceNumbers = new long[length]; @@ -264,8 +259,7 @@ public void testRemoveNotFound() { replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final String id = randomAlphaOfLength(8); final RetentionLeaseNotFoundException e = expectThrows( @@ -301,8 +295,7 @@ public void testRemoveRetentionLeaseCausesRetentionLeaseSync() { replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); @@ -353,8 +346,7 @@ private void runExpirationTest(final boolean primaryMode) { replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); if (primaryMode) { replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); } @@ -426,8 +418,7 @@ public void testReplicaIgnoresOlderRetentionLeasesVersion() { replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); final int length = randomIntBetween(0, 8); final List retentionLeasesCollection = new ArrayList<>(length); long primaryTerm = 1; @@ -480,8 +471,7 @@ public void testLoadAndPersistRetentionLeases() throws IOException { replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); for (int i = 0; i < length; i++) { @@ -520,8 +510,7 @@ public void testPersistRetentionLeasesUnderConcurrency() throws IOException { replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); for (int i = 0; i < length; i++) { diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java index 037d2130b5c7b..05ca0a5ea3006 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java @@ -64,7 +64,7 @@ import static org.hamcrest.Matchers.not; public class ReplicationTrackerTests extends ReplicationTrackerTestCase { - + public void testEmptyShards() { final ReplicationTracker tracker = newTracker(AllocationId.newInitializing()); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); @@ -86,7 +86,7 @@ private void updateLocalCheckpoint(final ReplicationTracker tracker, final Strin tracker.updateLocalCheckpoint(allocationId, localCheckpoint); assertThat(updatedGlobalCheckpoint.get(), equalTo(tracker.getGlobalCheckpoint())); } - + public void testGlobalCheckpointUpdate() { final long initialClusterStateVersion = randomNonNegativeLong(); Map allocations = new HashMap<>(); @@ -120,7 +120,7 @@ public void testGlobalCheckpointUpdate() { logger.info(" - [{}], local checkpoint [{}], [{}]", aId, allocations.get(aId), type); }); - tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId), emptySet()); + tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); assertThat(tracker.getReplicationGroup().getReplicationTargets().size(), equalTo(1)); initializing.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); @@ -147,7 +147,7 @@ public void testGlobalCheckpointUpdate() { Set newInitializing = new HashSet<>(initializing); newInitializing.add(extraId); - tracker.updateFromMaster(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing, primaryId), emptySet()); + tracker.updateFromMaster(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing, primaryId)); tracker.initiateTracking(extraId.getId()); @@ -187,7 +187,7 @@ public void testMarkAllocationIdAsInSync() throws BrokenBarrierException, Interr final AllocationId primaryId = active.iterator().next(); final AllocationId replicaId = initializing.iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId), emptySet()); + tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); final long localCheckpoint = randomLongBetween(0, Long.MAX_VALUE - 1); tracker.activatePrimaryMode(localCheckpoint); tracker.initiateTracking(replicaId.getId()); @@ -229,7 +229,7 @@ public void testMissingActiveIdsPreventAdvance() { assigned.putAll(initializing); AllocationId primaryId = active.keySet().iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId), emptySet()); + tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(initializing.keySet()).forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); final AllocationId missingActiveID = randomFrom(active.keySet()); @@ -256,7 +256,7 @@ public void testMissingInSyncIdsPreventAdvance() { AllocationId primaryId = active.keySet().iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId), emptySet()); + tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(randomIntBetween(1, initializing.size() - 1), initializing.keySet()).forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); @@ -278,7 +278,7 @@ public void testInSyncIdsAreIgnoredIfNotValidatedByMaster() { final Map nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); final AllocationId primaryId = active.keySet().iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId), emptySet()); + tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); initializing.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); nonApproved.keySet().forEach(k -> @@ -313,7 +313,7 @@ public void testInSyncIdsAreRemovedIfNotValidatedByMaster() { allocations.putAll(initializingToBeRemoved); } final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId), emptySet()); + tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); if (randomBoolean()) { initializingToStay.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); @@ -329,16 +329,14 @@ public void testInSyncIdsAreRemovedIfNotValidatedByMaster() { tracker.updateFromMaster( initialClusterStateVersion + 1, ids(activeToStay.keySet()), - routingTable(initializingToStay.keySet(), primaryId), - emptySet()); + routingTable(initializingToStay.keySet(), primaryId)); allocations.forEach((aid, ckp) -> updateLocalCheckpoint(tracker, aid.getId(), ckp + 10L)); } else { allocations.forEach((aid, ckp) -> updateLocalCheckpoint(tracker, aid.getId(), ckp + 10L)); tracker.updateFromMaster( initialClusterStateVersion + 2, ids(activeToStay.keySet()), - routingTable(initializingToStay.keySet(), primaryId), - emptySet()); + routingTable(initializingToStay.keySet(), primaryId)); } final long checkpoint = Stream.concat(activeToStay.values().stream(), initializingToStay.values().stream()) @@ -357,7 +355,7 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { final ReplicationTracker tracker = newTracker(inSyncAllocationId); final long clusterStateVersion = randomNonNegativeLong(); tracker.updateFromMaster(clusterStateVersion, Collections.singleton(inSyncAllocationId.getId()), - routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId), emptySet()); + routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId)); tracker.activatePrimaryMode(globalCheckpoint); final Thread thread = new Thread(() -> { try { @@ -397,7 +395,7 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { } else { // master changes its mind and cancels the allocation tracker.updateFromMaster(clusterStateVersion + 1, Collections.singleton(inSyncAllocationId.getId()), - routingTable(emptySet(), inSyncAllocationId), emptySet()); + routingTable(emptySet(), inSyncAllocationId)); barrier.await(); assertTrue(complete.get()); assertNull(tracker.getTrackedLocalCheckpointForShard(trackingAllocationId.getId())); @@ -405,7 +403,7 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { assertFalse(tracker.pendingInSync.contains(trackingAllocationId.getId())); thread.join(); } - + private AtomicLong updatedGlobalCheckpoint = new AtomicLong(UNASSIGNED_SEQ_NO); private ReplicationTracker newTracker(final AllocationId allocationId) { @@ -421,7 +419,7 @@ public void testWaitForAllocationIdToBeInSyncCanBeInterrupted() throws BrokenBar final AllocationId trackingAllocationId = AllocationId.newInitializing(); final ReplicationTracker tracker = newTracker(inSyncAllocationId); tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(inSyncAllocationId.getId()), - routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId), emptySet()); + routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId)); tracker.activatePrimaryMode(globalCheckpoint); final Thread thread = new Thread(() -> { try { @@ -470,7 +468,7 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { AllocationId primaryId = activeAllocationIds.iterator().next(); IndexShardRoutingTable routingTable = routingTable(initializingIds, primaryId); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(initialClusterStateVersion, ids(activeAllocationIds), routingTable, emptySet()); + tracker.updateFromMaster(initialClusterStateVersion, ids(activeAllocationIds), routingTable); tracker.activatePrimaryMode(NO_OPS_PERFORMED); assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); @@ -500,7 +498,7 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { final Set newInitializingAllocationIds = initializingIds.stream().filter(a -> !removingInitializingAllocationIds.contains(a)).collect(Collectors.toSet()); routingTable = routingTable(newInitializingAllocationIds, primaryId); - tracker.updateFromMaster(initialClusterStateVersion + 1, ids(newActiveAllocationIds), routingTable, emptySet()); + tracker.updateFromMaster(initialClusterStateVersion + 1, ids(newActiveAllocationIds), routingTable); assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); assertTrue(removingActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()) == null)); assertTrue(newInitializingAllocationIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); @@ -517,8 +515,7 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { tracker.updateFromMaster( initialClusterStateVersion + 2, ids(newActiveAllocationIds), - routingTable(newInitializingAllocationIds, primaryId), - emptySet()); + routingTable(newInitializingAllocationIds, primaryId)); assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); assertTrue( newActiveAllocationIds @@ -550,10 +547,10 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { .entrySet() .stream() .allMatch(e -> tracker.getTrackedLocalCheckpointForShard(e.getKey().getId()).getLocalCheckpoint() == e.getValue())); - final long minimumActiveLocalCheckpoint = (long) activeLocalCheckpoints.values().stream().min(Integer::compareTo).get(); + final long minimumActiveLocalCheckpoint = activeLocalCheckpoints.values().stream().min(Integer::compareTo).get(); assertThat(tracker.getGlobalCheckpoint(), equalTo(minimumActiveLocalCheckpoint)); assertThat(updatedGlobalCheckpoint.get(), equalTo(minimumActiveLocalCheckpoint)); - final long minimumInitailizingLocalCheckpoint = (long) initializingLocalCheckpoints.values().stream().min(Integer::compareTo).get(); + final long minimumInitailizingLocalCheckpoint = initializingLocalCheckpoints.values().stream().min(Integer::compareTo).get(); // now we are going to add a new allocation ID and bring it in sync which should move it to the in-sync allocation IDs final long localCheckpoint = @@ -565,8 +562,7 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { tracker.updateFromMaster( initialClusterStateVersion + 3, ids(newActiveAllocationIds), - routingTable(newInitializingAllocationIds, primaryId), - emptySet()); + routingTable(newInitializingAllocationIds, primaryId)); final CyclicBarrier barrier = new CyclicBarrier(2); final Thread thread = new Thread(() -> { try { @@ -604,8 +600,7 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { tracker.updateFromMaster( initialClusterStateVersion + 4, ids(newActiveAllocationIds), - routingTable(newInitializingAllocationIds, primaryId), - emptySet()); + routingTable(newInitializingAllocationIds, primaryId)); assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync); assertFalse(tracker.pendingInSync.contains(newSyncingAllocationId.getId())); } @@ -633,8 +628,7 @@ public void testRaceUpdatingGlobalCheckpoint() throws InterruptedException, Brok tracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(active.getId()), - routingTable(Collections.singleton(initializing), active), - emptySet()); + routingTable(Collections.singleton(initializing), active)); tracker.activatePrimaryMode(activeLocalCheckpoint); final int nextActiveLocalCheckpoint = randomIntBetween(activeLocalCheckpoint + 1, Integer.MAX_VALUE); final Thread activeThread = new Thread(() -> { @@ -835,7 +829,7 @@ public void testIllegalStateExceptionIfUnknownAllocationId() { final AllocationId initializing = AllocationId.newInitializing(); final ReplicationTracker tracker = newTracker(active); tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(active.getId()), - routingTable(Collections.singleton(initializing), active), emptySet()); + routingTable(Collections.singleton(initializing), active)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(randomAlphaOfLength(10))); @@ -863,7 +857,7 @@ public Set initializingIds() { } public void apply(ReplicationTracker gcp) { - gcp.updateFromMaster(version, ids(inSyncIds), routingTable, Collections.emptySet()); + gcp.updateFromMaster(version, ids(inSyncIds), routingTable); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index b34f364bbed2c..0be7b4433fac3 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -603,7 +603,7 @@ public void testPrimaryPromotionRollsGeneration() throws Exception { replicaRouting.allocationId()); indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {}, 0L, Collections.singleton(primaryRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), Collections.emptySet()); + new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build()); /* * This operation completing means that the delay operation executed as part of increasing the primary term has completed and the @@ -654,8 +654,7 @@ public void testOperationPermitsOnPrimaryShards() throws Exception { latch.countDown(); }, 0L, Collections.singleton(indexShard.routingEntry().allocationId().getId()), - new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(), - Collections.emptySet()); + new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build()); latch.await(); assertThat(indexShard.getActiveOperationsCount(), isOneOf(0, IndexShard.OPERATIONS_BLOCKED)); if (randomBoolean()) { @@ -1140,8 +1139,7 @@ public void onFailure(Exception e) { (s, r) -> resyncLatch.countDown(), 1L, Collections.singleton(newRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(newRouting.shardId()).addShard(newRouting).build(), - Collections.emptySet()); + new IndexShardRoutingTable.Builder(newRouting.shardId()).addShard(newRouting).build()); resyncLatch.await(); assertThat(indexShard.getLocalCheckpoint(), equalTo(maxSeqNo)); assertThat(indexShard.seqNoStats().getMaxSeqNo(), equalTo(maxSeqNo)); diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index c7d59fdb7c25e..e0825445bb8c2 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -99,7 +99,7 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { String allocationId = shard.routingEntry().allocationId().getId(); shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId), - new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), Collections.emptySet()); + new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build()); shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint); assertEquals(globalCheckPoint, shard.getGlobalCheckpoint()); @@ -159,7 +159,7 @@ public void testSyncerOnClosingShard() throws Exception { String allocationId = shard.routingEntry().allocationId().getId(); shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId), - new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), Collections.emptySet()); + new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build()); CountDownLatch syncCalledLatch = new CountDownLatch(1); PlainActionFuture fut = new PlainActionFuture() { diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index e8b52fb3bbd2c..80cf443e5007e 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -355,8 +355,7 @@ public void updateShardState(ShardRouting shardRouting, BiConsumer> primaryReplicaSyncer, long applyingClusterStateVersion, Set inSyncAllocationIds, - IndexShardRoutingTable routingTable, - Set pre60AllocationIds) throws IOException { + IndexShardRoutingTable routingTable) throws IOException { failRandomly(); assertThat(this.shardId(), equalTo(shardRouting.shardId())); assertTrue("current: " + this.shardRouting + ", got: " + shardRouting, this.shardRouting.isSameAllocation(shardRouting)); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java index bb1aac89f3e8f..e77bf5f8d4ae5 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java @@ -75,11 +75,7 @@ public void testSerialization() throws Exception { assertThat(outRequest.metadataSnapshot().asMap(), equalTo(inRequest.metadataSnapshot().asMap())); assertThat(outRequest.isPrimaryRelocation(), equalTo(inRequest.isPrimaryRelocation())); assertThat(outRequest.recoveryId(), equalTo(inRequest.recoveryId())); - if (targetNodeVersion.onOrAfter(Version.V_6_0_0_alpha1)) { - assertThat(outRequest.startingSeqNo(), equalTo(inRequest.startingSeqNo())); - } else { - assertThat(SequenceNumbers.UNASSIGNED_SEQ_NO, equalTo(inRequest.startingSeqNo())); - } + assertThat(outRequest.startingSeqNo(), equalTo(inRequest.startingSeqNo())); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 0063f2a6d9b0c..b11a0f84fb84a 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -293,7 +293,7 @@ public void startPrimary() throws IOException { ShardRouting startedRoutingEntry = ShardRoutingHelper.moveToStarted(primary.routingEntry()); IndexShardRoutingTable routingTable = routingTable(shr -> shr == primary.routingEntry() ? startedRoutingEntry : shr); primary.updateShardState(startedRoutingEntry, primary.getPendingPrimaryTerm(), null, - currentClusterStateVersion.incrementAndGet(), activeIds, routingTable, Collections.emptySet()); + currentClusterStateVersion.incrementAndGet(), activeIds, routingTable); for (final IndexShard replica : replicas) { recoverReplica(replica); } @@ -385,7 +385,7 @@ public synchronized void promoteReplicaToPrimary(IndexShard replica, IndexShardRoutingTable routingTable = routingTable(shr -> shr == replica.routingEntry() ? primaryRouting : shr); primary.updateShardState(primaryRouting, newTerm, primaryReplicaSyncer, currentClusterStateVersion.incrementAndGet(), - activeIds(), routingTable, Collections.emptySet()); + activeIds(), routingTable); } private synchronized Set activeIds() { @@ -520,7 +520,7 @@ private void updateAllocationIDsOnPrimary() throws IOException { primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(), - activeIds(), routingTable(Function.identity()), Collections.emptySet()); + activeIds(), routingTable(Function.identity())); } private synchronized void computeReplicationTargets() { diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 105ec5415d686..6175a22760029 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -548,7 +548,7 @@ public static void updateRoutingEntry(IndexShard shard, ShardRouting shardRoutin .addShard(shardRouting) .build(); shard.updateShardState(shardRouting, shard.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(), - inSyncIds, newRoutingTable, Collections.emptySet()); + inSyncIds, newRoutingTable); } protected void recoveryEmptyReplica(IndexShard replica, boolean startReplica) throws IOException { @@ -633,7 +633,7 @@ protected final void recoverUnstartedReplica(final IndexShard replica, new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), request, Math.toIntExact(ByteSizeUnit.MB.toBytes(1)), between(1, 8)); primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null, - currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable, Collections.emptySet()); + currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable); PlainActionFuture future = new PlainActionFuture<>(); recovery.recoverToTarget(future); @@ -658,9 +658,9 @@ protected void startReplicaAfterRecovery(IndexShard replica, IndexShard primary, inSyncIdsWithReplica.add(replica.routingEntry().allocationId().getId()); // update both primary and replica shard state primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null, - currentClusterStateVersion.incrementAndGet(), inSyncIdsWithReplica, newRoutingTable, Collections.emptySet()); + currentClusterStateVersion.incrementAndGet(), inSyncIdsWithReplica, newRoutingTable); replica.updateShardState(replica.routingEntry().moveToStarted(), replica.getPendingPrimaryTerm(), null, - currentClusterStateVersion.get(), inSyncIdsWithReplica, newRoutingTable, Collections.emptySet()); + currentClusterStateVersion.get(), inSyncIdsWithReplica, newRoutingTable); } @@ -685,7 +685,7 @@ protected void promoteReplica(IndexShard replica, Set inSyncIds, IndexSh (is, listener) -> listener.onResponse(new PrimaryReplicaSyncer.ResyncTask(1, "type", "action", "desc", null, Collections.emptyMap())), currentClusterStateVersion.incrementAndGet(), - inSyncIds, newRoutingTable, Collections.emptySet()); + inSyncIds, newRoutingTable); } private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException { diff --git a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java index 2395fcc2484e9..0143dad55b0e0 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java @@ -31,6 +31,7 @@ import static java.util.stream.Collectors.toCollection; import static java.util.stream.Collectors.toList; +import static org.elasticsearch.Version.fromId; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -62,22 +63,21 @@ public void testRandomVersionBetween() { assertTrue(got.onOrBefore(Version.CURRENT)); // sub range - got = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, - Version.V_6_2_4); - assertTrue(got.onOrAfter(Version.V_6_0_0_alpha1)); - assertTrue(got.onOrBefore(Version.V_6_2_4)); + got = VersionUtils.randomVersionBetween(random(), fromId(7000099), fromId(7010099)); + assertTrue(got.onOrAfter(fromId(7000099))); + assertTrue(got.onOrBefore(fromId(7010099))); // unbounded lower - got = VersionUtils.randomVersionBetween(random(), null, Version.V_6_0_0_beta1); + got = VersionUtils.randomVersionBetween(random(), null, fromId(7000099)); assertTrue(got.onOrAfter(VersionUtils.getFirstVersion())); - assertTrue(got.onOrBefore(Version.V_6_0_0_beta1)); + assertTrue(got.onOrBefore(fromId(7000099))); got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.allReleasedVersions().get(0)); assertTrue(got.onOrAfter(VersionUtils.getFirstVersion())); assertTrue(got.onOrBefore(VersionUtils.allReleasedVersions().get(0))); // unbounded upper - got = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, null); - assertTrue(got.onOrAfter(Version.V_6_0_0)); + got = VersionUtils.randomVersionBetween(random(), fromId(7000099), null); + assertTrue(got.onOrAfter(fromId(7000099))); assertTrue(got.onOrBefore(Version.CURRENT)); got = VersionUtils.randomVersionBetween(random(), VersionUtils.getPreviousVersion(), null); assertTrue(got.onOrAfter(VersionUtils.getPreviousVersion())); @@ -88,9 +88,9 @@ public void testRandomVersionBetween() { assertEquals(got, VersionUtils.getFirstVersion()); got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, Version.CURRENT); assertEquals(got, Version.CURRENT); - got = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_beta1, - Version.V_6_0_0_beta1); - assertEquals(got, Version.V_6_0_0_beta1); + got = VersionUtils.randomVersionBetween(random(), fromId(7000099), + fromId(7000099)); + assertEquals(got, fromId(7000099)); // implicit range of one got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.getFirstVersion()); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java index 1326f0ebc79bb..947ce78da2ca3 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java @@ -80,7 +80,7 @@ public void testDoNotFillGaps() throws Exception { replicaRouting.allocationId()); indexShard.updateShardState(primaryRouting, indexShard.getOperationPrimaryTerm() + 1, (shard, listener) -> {}, 0L, Collections.singleton(primaryRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), Collections.emptySet()); + new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build()); final CountDownLatch latch = new CountDownLatch(1); ActionListener actionListener = ActionListener.wrap(releasable -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index 2ad999d82ade0..0f502577195dd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -106,7 +106,7 @@ public boolean isUpgradeMode() { @Override public Version getMinimalSupportedVersion() { - return Version.V_6_0_0_alpha1; + return Version.CURRENT.minimumIndexCompatibilityVersion(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java index 46111b9b16cd1..8d9b9c762e35f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java @@ -93,7 +93,7 @@ public String toString() { @Override public Version getMinimalSupportedVersion() { - return Version.V_6_0_0_beta2; + return Version.CURRENT.minimumIndexCompatibilityVersion(); } @Override diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index e03f07740095a..94b205b3d85d1 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -233,7 +233,8 @@ public void testToXContent() throws IOException { final List usages = singletonList(new MonitoringFeatureSetUsage(false, true, false, null)); final NodeInfo mockNodeInfo = mock(NodeInfo.class); - when(mockNodeInfo.getVersion()).thenReturn(Version.V_6_0_0_alpha2); + Version mockNodeVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); + when(mockNodeInfo.getVersion()).thenReturn(mockNodeVersion); when(mockNodeInfo.getNode()).thenReturn(discoveryNode); final TransportInfo mockTransportInfo = mock(TransportInfo.class); @@ -446,7 +447,7 @@ public void testToXContent() throws IOException { + "\"ingest\":0" + "}," + "\"versions\":[" - + "\"6.0.0-alpha2\"" + + "\"" + mockNodeVersion + "\"" + "]," + "\"os\":{" + "\"available_processors\":32," diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryMonitoringDocTests.java index 49fdd9ad244ac..08e04725c2e0b 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryMonitoringDocTests.java @@ -89,7 +89,7 @@ public void testToXContent() throws IOException { new TransportAddress(TransportAddress.META_ADDRESS, 9301), singletonMap("attr", "value_1"), singleton(DiscoveryNode.Role.DATA), - Version.V_6_0_0_alpha1); + Version.CURRENT.minimumIndexCompatibilityVersion()); final ShardId shardId = new ShardId("_index_a", "_uuid_a", 0); final RecoverySource source = RecoverySource.PeerRecoverySource.INSTANCE; From 14e384825190a976a5a13804d170b66d7b87e640 Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Wed, 8 May 2019 15:09:02 -0600 Subject: [PATCH 027/321] Add note about ILM action ordering (#41771) Adds a note clarifying that actions are ordered automatically. --- docs/reference/ilm/policy-definitions.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index 945f80babad1b..00578ce8c050f 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -84,6 +84,10 @@ executing. The below list shows the actions which are available in each phase. +NOTE: The order that configured actions are performed in within each phase is +determined by automatically by {ilm-init}, and cannot be changed by changing the +policy definition. + * Hot - <> - <> From 85226d6c7d3b8ccbfa19e39179e7c63e1a26c134 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 8 May 2019 16:18:10 -0500 Subject: [PATCH 028/321] [ML] relax set upgrade mode test to match what is guaranteed (#41958) * [ML] relax set upgrade mode test to match what is guaranteed * removing unused import --- .../xpack/ml/integration/SetUpgradeModeIT.java | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java index 57c9245e2c5b3..f97c27e4ccca4 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java @@ -25,6 +25,7 @@ import java.util.Collections; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.core.ml.MlTasks.AWAITING_UPGRADE; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeed; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createScheduledJob; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.getDataCounts; @@ -33,7 +34,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.isEmptyString; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -88,12 +88,12 @@ public void testEnableUpgradeMode() throws Exception { GetJobsStatsAction.Response.JobStats jobStats = getJobStats(jobId).get(0); assertThat(jobStats.getState(), equalTo(JobState.OPENED)); - assertThat(jobStats.getAssignmentExplanation(), equalTo(MlTasks.AWAITING_UPGRADE.getExplanation())); + assertThat(jobStats.getAssignmentExplanation(), equalTo(AWAITING_UPGRADE.getExplanation())); assertThat(jobStats.getNode(), is(nullValue())); GetDatafeedsStatsAction.Response.DatafeedStats datafeedStats = getDatafeedStats(datafeedId); assertThat(datafeedStats.getDatafeedState(), equalTo(DatafeedState.STARTED)); - assertThat(datafeedStats.getAssignmentExplanation(), equalTo(MlTasks.AWAITING_UPGRADE.getExplanation())); + assertThat(datafeedStats.getAssignmentExplanation(), equalTo(AWAITING_UPGRADE.getExplanation())); assertThat(datafeedStats.getNode(), is(nullValue())); Job.Builder job = createScheduledJob("job-should-not-open"); @@ -126,13 +126,11 @@ public void testEnableUpgradeMode() throws Exception { jobStats = getJobStats(jobId).get(0); assertThat(jobStats.getState(), equalTo(JobState.OPENED)); - assertThat(jobStats.getAssignmentExplanation(), isEmptyString()); - assertThat(jobStats.getNode(), is(not(nullValue()))); + assertThat(jobStats.getAssignmentExplanation(), not(equalTo(AWAITING_UPGRADE.getExplanation()))); datafeedStats = getDatafeedStats(datafeedId); assertThat(datafeedStats.getDatafeedState(), equalTo(DatafeedState.STARTED)); - assertThat(datafeedStats.getAssignmentExplanation(), isEmptyString()); - assertThat(datafeedStats.getNode(), is(not(nullValue()))); + assertThat(datafeedStats.getAssignmentExplanation(), not(equalTo(AWAITING_UPGRADE.getExplanation()))); } private void startRealtime(String jobId) throws Exception { From 2637e499ac88ce0aa14db62b13be052e7ca78f98 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 9 May 2019 08:46:34 +0200 Subject: [PATCH 029/321] Fix assertion error when caching the result of a search in a read-only index (#41900) The ReadOnlyEngine wraps its reader with a SoftDeletesDirectoryReaderWrapper if soft deletes are enabled. However the wrapping is done on top of the ElasticsearchDirectoryReader and that trips assertion later on since the cache key of these directories are different. This commit changes the order of the wrapping to put the ElasticsearchDirectoryReader first in order to ensure that it is always retrieved first when we unwrap the directory. Closes #41795 --- .../elasticsearch/index/engine/ReadOnlyEngine.java | 4 ++-- .../index/engine/ReadOnlyEngineTests.java | 13 ++++++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index 5acac256dbd50..b981bdb8a8421 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -157,11 +157,11 @@ public void verifyEngineBeforeIndexClosing() throws IllegalStateException { protected final DirectoryReader wrapReader(DirectoryReader reader, Function readerWrapperFunction) throws IOException { - reader = ElasticsearchDirectoryReader.wrap(reader, engineConfig.getShardId()); if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD); } - return readerWrapperFunction.apply(reader); + reader = readerWrapperFunction.apply(reader); + return ElasticsearchDirectoryReader.wrap(reader, engineConfig.getShardId()); } protected DirectoryReader open(IndexCommit commit) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java index f9437ac9251bf..e0ad514e6dbb9 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -18,9 +18,12 @@ */ package org.elasticsearch.index.engine; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.seqno.SeqNoStats; @@ -32,7 +35,9 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; +import static org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader.getElasticsearchDirectoryReader; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class ReadOnlyEngineTests extends EngineTestCase { @@ -80,6 +85,13 @@ public void testReadOnlyEngine() throws Exception { Engine.Searcher external = readOnlyEngine.acquireSearcher("test", Engine.SearcherScope.EXTERNAL); Engine.Searcher internal = readOnlyEngine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); assertSame(external.reader(), internal.reader()); + assertThat(external.reader(), instanceOf(DirectoryReader.class)); + DirectoryReader dirReader = external.getDirectoryReader(); + ElasticsearchDirectoryReader esReader = getElasticsearchDirectoryReader(dirReader); + IndexReader.CacheHelper helper = esReader.getReaderCacheHelper(); + assertNotNull(helper); + assertEquals(helper.getKey(), dirReader.getReaderCacheHelper().getKey()); + IOUtils.close(external, internal); // the locked down engine should still point to the previous commit assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); @@ -88,7 +100,6 @@ public void testReadOnlyEngine() throws Exception { try (Engine.GetResult getResult = readOnlyEngine.get(get, readOnlyEngine::acquireSearcher)) { assertTrue(getResult.exists()); } - } // Close and reopen the main engine try (InternalEngine recoveringEngine = new InternalEngine(config)) { From ed3e25ae7da82ad14fa5b2db434de4fbeb74e7a3 Mon Sep 17 00:00:00 2001 From: Flavio Pompermaier Date: Thu, 9 May 2019 08:52:07 +0200 Subject: [PATCH 030/321] Fix wrong property name (#40636) --- docs/reference/ingest/processors/geoip.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index f38e62806bb9d..4e866624309a7 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -27,7 +27,7 @@ uncompressed. The `ingest-geoip` config directory is located at `$ES_HOME/config | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document |====== -*Depends on what is available in `database_field`: +*Depends on what is available in `database_file`: * If the GeoLite2 City database is used, then the following fields may be added under the `target_field`: `ip`, `country_iso_code`, `country_name`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, `latitude`, `longitude` From 41d59954c1b6dce742843095d56b9c2a66dd371c Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Thu, 9 May 2019 10:12:30 +0300 Subject: [PATCH 031/321] Upgrade to Gradle 5.4.1 (#41750) * Upgrade to Gradle 5.4.1 https://docs.gradle.org/5.4/release-notes.html Notable: Support for JDK12 , API for incremental tasks * Use newer version of checkstyle * Increase stack size --- .../org/elasticsearch/gradle/precommit/PrecommitTasks.groovy | 5 ++++- buildSrc/src/main/resources/minimumGradleVersion | 2 +- gradle.properties | 2 +- gradle/wrapper/gradle-wrapper.properties | 4 ++-- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 0316acef922c4..e14a8f97ba81d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -34,6 +34,9 @@ import org.gradle.api.plugins.quality.Checkstyle class PrecommitTasks { /** Adds a precommit task, which depends on non-test verification tasks. */ + + public static final String CHECKSTYLE_VERSION = '8.20' + public static Task create(Project project, boolean includeDependencyLicenses) { project.configurations.create("forbiddenApisCliJar") project.dependencies { @@ -213,7 +216,7 @@ class PrecommitTasks { configProperties = [ suppressions: checkstyleSuppressions ] - toolVersion = '8.10.1' + toolVersion = CHECKSTYLE_VERSION } project.tasks.withType(Checkstyle) { task -> diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index 11aa145248e68..04edabda285a6 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -5.3 \ No newline at end of file +5.4.1 \ No newline at end of file diff --git a/gradle.properties b/gradle.properties index ec79845d44e78..491770edd7c52 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,3 +1,3 @@ org.gradle.daemon=true -org.gradle.jvmargs=-Xmx2g -XX:+HeapDumpOnOutOfMemoryError +org.gradle.jvmargs=-Xmx2g -XX:+HeapDumpOnOutOfMemoryError -Xss2m options.forkOptions.memoryMaximumSize=2g diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 8d172843af1d6..47216b872e431 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-5.3-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-5.4.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=f4d820c2a9685710eba5b92f10e0e4fb20e0d6c0dd1f46971e658160f25e7147 +distributionSha256Sum=14cd15fc8cc8705bd69dcfa3c8fefb27eb7027f5de4b47a8b279218f76895a91 From 917def1c768439f4643fc8ba9fe1b67ae5e73233 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 9 May 2019 09:51:06 +0100 Subject: [PATCH 032/321] Splits :client:rest-high-level and :x-pack:plugin:core modules in Eclipse (#41064) --- client/rest-high-level/build.gradle | 22 +++++++++++++-- .../src/main/eclipse-build.gradle | 2 ++ .../src/test/eclipse-build.gradle | 6 +++++ settings.gradle | 11 ++++++++ x-pack/docs/build.gradle | 7 +++-- x-pack/plugin/build.gradle | 4 ++- x-pack/plugin/ccr/build.gradle | 7 +++-- .../downgrade-to-basic-license/build.gradle | 3 +++ .../plugin/ccr/qa/multi-cluster/build.gradle | 3 +++ .../ccr/qa/non-compliant-license/build.gradle | 3 +++ x-pack/plugin/ccr/qa/rest/build.gradle | 3 +++ x-pack/plugin/ccr/qa/security/build.gradle | 3 +++ x-pack/plugin/core/build.gradle | 27 ++++++++++++++++--- .../plugin/core/src/main/eclipse-build.gradle | 2 ++ .../plugin/core/src/test/eclipse-build.gradle | 6 +++++ x-pack/plugin/data-frame/build.gradle | 3 +++ .../qa/multi-node-tests/build.gradle | 3 +++ .../qa/single-node-tests/build.gradle | 3 +++ x-pack/plugin/graph/build.gradle | 3 +++ x-pack/plugin/ilm/build.gradle | 3 +++ x-pack/plugin/ilm/qa/rest/build.gradle | 3 +++ x-pack/plugin/logstash/build.gradle | 3 +++ x-pack/plugin/ml/build.gradle | 3 +++ .../ml/qa/ml-with-security/build.gradle | 3 +++ .../qa/native-multi-node-tests/build.gradle | 3 +++ x-pack/plugin/monitoring/build.gradle | 3 +++ x-pack/plugin/rollup/build.gradle | 3 +++ x-pack/plugin/security/build.gradle | 4 ++- x-pack/plugin/security/cli/build.gradle | 3 +++ .../plugin/security/qa/tls-basic/build.gradle | 4 ++- x-pack/plugin/sql/build.gradle | 5 +++- x-pack/plugin/sql/jdbc/build.gradle | 3 +++ x-pack/plugin/watcher/build.gradle | 5 +++- x-pack/qa/evil-tests/build.gradle | 3 +++ x-pack/qa/kerberos-tests/build.gradle | 4 ++- x-pack/qa/oidc-op-tests/build.gradle | 7 +++-- x-pack/qa/openldap-tests/build.gradle | 3 +++ .../reindex-tests-with-security/build.gradle | 3 +++ x-pack/qa/saml-idp-tests/build.gradle | 2 +- .../build.gradle | 3 +++ 40 files changed, 176 insertions(+), 18 deletions(-) create mode 100644 client/rest-high-level/src/main/eclipse-build.gradle create mode 100644 client/rest-high-level/src/test/eclipse-build.gradle create mode 100644 x-pack/plugin/core/src/main/eclipse-build.gradle create mode 100644 x-pack/plugin/core/src/test/eclipse-build.gradle diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index a5035a70bcee2..c9a3fc486f1da 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -63,9 +63,13 @@ dependencies { testCompile "junit:junit:${versions.junit}" //this is needed to make RestHighLevelClientTests#testApiNamingConventions work from IDEs testCompile "org.elasticsearch:rest-api-spec:${version}" - // Needed for serialization tests: + // Needed for serialization tests: // (In order to serialize a server side class to a client side class or the other way around) - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + if (isEclipse == false || project.path == ":client:rest-high-level-tests") { + testCompile("org.elasticsearch.plugin:x-pack-core:${version}") { + exclude group: 'org.elasticsearch', module: 'elasticsearch-rest-high-level-client' + } + } restSpec "org.elasticsearch:rest-api-spec:${version}" } @@ -92,6 +96,20 @@ forbiddenApisMain { addSignatureFiles 'http-signatures' signaturesFiles += files('src/main/resources/forbidden/rest-high-level-signatures.txt') } + +if (isEclipse) { + // in eclipse the project is under a fake root, we need to change around the source sets + sourceSets { + if (project.path == ":client:rest-high-level") { + main.java.srcDirs = ['java'] + main.resources.srcDirs = ['resources'] + } else { + test.java.srcDirs = ['java'] + test.resources.srcDirs = ['resources'] + } + } +} + File nodeCert = file("./testnode.crt") File nodeTrustStore = file("./testnode.jks") diff --git a/client/rest-high-level/src/main/eclipse-build.gradle b/client/rest-high-level/src/main/eclipse-build.gradle new file mode 100644 index 0000000000000..6bc7562f7fded --- /dev/null +++ b/client/rest-high-level/src/main/eclipse-build.gradle @@ -0,0 +1,2 @@ +// this is just shell gradle file for eclipse to have separate projects for core src and tests +apply from: '../../build.gradle' diff --git a/client/rest-high-level/src/test/eclipse-build.gradle b/client/rest-high-level/src/test/eclipse-build.gradle new file mode 100644 index 0000000000000..56d355658879c --- /dev/null +++ b/client/rest-high-level/src/test/eclipse-build.gradle @@ -0,0 +1,6 @@ +// this is just shell gradle file for eclipse to have separate projects for core src and tests +apply from: '../../build.gradle' + +dependencies { + testCompile project(':client:rest-high-level') +} diff --git a/settings.gradle b/settings.gradle index 4e6c59116e9ab..e0650f618ddf4 100644 --- a/settings.gradle +++ b/settings.gradle @@ -112,6 +112,8 @@ if (isEclipse) { projects << 'libs:grok-tests' projects << 'libs:geo-tests' projects << 'libs:ssl-config-tests' + projects << 'client:rest-high-level-tests' + projects << 'x-pack:plugin:core-tests' } include projects.toArray(new String[0]) @@ -155,6 +157,15 @@ if (isEclipse) { project(":libs:ssl-config").buildFileName = 'eclipse-build.gradle' project(":libs:ssl-config-tests").projectDir = new File(rootProject.projectDir, 'libs/ssl-config/src/test') project(":libs:ssl-config-tests").buildFileName = 'eclipse-build.gradle' + project(":client:rest-high-level").projectDir = new File(rootProject.projectDir, 'client/rest-high-level/src/main') + project(":client:rest-high-level").buildFileName = 'eclipse-build.gradle' + project(":client:rest-high-level-tests").projectDir = new File(rootProject.projectDir, 'client/rest-high-level/src/test') + project(":client:rest-high-level-tests").buildFileName = 'eclipse-build.gradle' + project(":x-pack:plugin:core").projectDir = new File(rootProject.projectDir, 'x-pack/plugin/core/src/main') + project(":x-pack:plugin:core").buildFileName = 'eclipse-build.gradle' + project(":x-pack:plugin:core-tests").projectDir = new File(rootProject.projectDir, 'x-pack/plugin/core/src/test') + project(":x-pack:plugin:core-tests").buildFileName = 'eclipse-build.gradle' + } // look for extra plugins for elasticsearch diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 5e56414afed24..d7517d007d7c8 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -21,6 +21,9 @@ dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') } @@ -741,11 +744,11 @@ setups['app0102_privileges'] = ''' "name": "read", "actions": [ "data:read/*", - "action:login" ], + "action:login" ], "metadata": { "description": "Read access to myapp" } } - } + } } ''' diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 6ce71982f5b1d..fc5dc839ef05e 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -11,6 +11,9 @@ archivesBaseName = 'x-pack' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } } subprojects { @@ -187,4 +190,3 @@ integTestCluster { if (integTestCluster.distribution.startsWith("oss-")) { integTest.enabled = false } - diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index a808a7197cccb..4b3ba9307da28 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -18,7 +18,7 @@ String[] noSecurityManagerITClasses = [ "**/CloseFollowerIndexIT.class" ] task internalClusterTestNoSecurityManager(type: Test) { description = 'Java fantasy integration tests with no security manager' - + include noSecurityManagerITClasses systemProperty 'es.set.netty.runtime.available.processors', 'false' systemProperty 'tests.security.manager', 'false' @@ -30,7 +30,7 @@ task internalClusterTest(type: Test) { description = 'Java fantasy integration tests' dependsOn internalClusterTestNoSecurityManager mustRunAfter test - + include '**/*IT.class' exclude noSecurityManagerITClasses systemProperty 'es.set.netty.runtime.available.processors', 'false' @@ -52,6 +52,9 @@ dependencies { compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('monitoring'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index cbf30b54d5fdb..cbba093c5526b 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -4,6 +4,9 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('ccr'), configuration: 'runtime') testCompile project(':x-pack:plugin:ccr:qa') } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index 7c9c581c5be19..ba4d7ea2064a1 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -4,6 +4,9 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('ccr'), configuration: 'runtime') testCompile project(':x-pack:plugin:ccr:qa') } diff --git a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle index 6d294c4075595..2ef358b6d735d 100644 --- a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle +++ b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle @@ -4,6 +4,9 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('ccr'), configuration: 'runtime') testCompile project(':x-pack:plugin:ccr:qa:') } diff --git a/x-pack/plugin/ccr/qa/rest/build.gradle b/x-pack/plugin/ccr/qa/rest/build.gradle index b06535a17c096..ba0c05e09791a 100644 --- a/x-pack/plugin/ccr/qa/rest/build.gradle +++ b/x-pack/plugin/ccr/qa/rest/build.gradle @@ -4,6 +4,9 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('ccr'), configuration: 'runtime') } diff --git a/x-pack/plugin/ccr/qa/security/build.gradle b/x-pack/plugin/ccr/qa/security/build.gradle index 0e082f51d71a1..872e99051018c 100644 --- a/x-pack/plugin/ccr/qa/security/build.gradle +++ b/x-pack/plugin/ccr/qa/security/build.gradle @@ -4,6 +4,9 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('ccr'), configuration: 'runtime') testCompile project(':x-pack:plugin:ccr:qa') } diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index a25c53506fb85..c20449724f8e0 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -48,8 +48,12 @@ dependencies { testCompile project(path: ':modules:parent-join', configuration: 'runtime') testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') testCompile project(path: ':modules:analysis-common', configuration: 'runtime') - testCompile(project(':x-pack:license-tools')) { - transitive = false + testCompile ("org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}") + + if (isEclipse == false || project.path == ":x-pack:plugin:core-tests") { + testCompile(project(':x-pack:license-tools')) { + transitive = false + } } } @@ -68,7 +72,11 @@ processResources { if (licenseKey != null) { println "Using provided license key from ${licenseKey}" } else if (snapshot) { - licenseKey = Paths.get(project.projectDir.path, 'snapshot.key') + if (isEclipse) { + licenseKey = Paths.get(project.projectDir.path, '../../snapshot.key') + } else { + licenseKey = Paths.get(project.projectDir.path, 'snapshot.key') + } } else { throw new IllegalArgumentException('Property license.key must be set for release build') } @@ -87,6 +95,19 @@ forbiddenPatterns { exclude '**/*.zip' } +if (isEclipse) { + // in eclipse the project is under a fake root, we need to change around the source sets + sourceSets { + if (project.path == ":libs:core") { + main.java.srcDirs = ['java'] + main.resources.srcDirs = ['resources'] + } else { + test.java.srcDirs = ['java'] + test.resources.srcDirs = ['resources'] + } + } +} + compileJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" compileTestJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" diff --git a/x-pack/plugin/core/src/main/eclipse-build.gradle b/x-pack/plugin/core/src/main/eclipse-build.gradle new file mode 100644 index 0000000000000..6bc7562f7fded --- /dev/null +++ b/x-pack/plugin/core/src/main/eclipse-build.gradle @@ -0,0 +1,2 @@ +// this is just shell gradle file for eclipse to have separate projects for core src and tests +apply from: '../../build.gradle' diff --git a/x-pack/plugin/core/src/test/eclipse-build.gradle b/x-pack/plugin/core/src/test/eclipse-build.gradle new file mode 100644 index 0000000000000..ddec3a7fbd7d3 --- /dev/null +++ b/x-pack/plugin/core/src/test/eclipse-build.gradle @@ -0,0 +1,6 @@ +// this is just shell gradle file for eclipse to have separate projects for core src and tests +apply from: '../../build.gradle' + +dependencies { + testCompile project(':x-pack:plugin:core') +} diff --git a/x-pack/plugin/data-frame/build.gradle b/x-pack/plugin/data-frame/build.gradle index e065f72e99880..1e939b2ceb949 100644 --- a/x-pack/plugin/data-frame/build.gradle +++ b/x-pack/plugin/data-frame/build.gradle @@ -13,6 +13,9 @@ dependencies { compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } } // xpack modules are installed in real clusters as the meta plugin, so diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/build.gradle b/x-pack/plugin/data-frame/qa/multi-node-tests/build.gradle index ab170d6be364f..0517f71b2dd3e 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/build.gradle +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/build.gradle @@ -4,6 +4,9 @@ apply plugin: 'elasticsearch.rest-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('data-frame'), configuration: 'runtime') } diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle b/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle index 11014c764e330..3fbd779f36239 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle +++ b/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle @@ -4,6 +4,9 @@ apply plugin: 'elasticsearch.rest-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('data-frame'), configuration: 'runtime') } diff --git a/x-pack/plugin/graph/build.gradle b/x-pack/plugin/graph/build.gradle index e7b0b44fd659b..286f8ac0a8917 100644 --- a/x-pack/plugin/graph/build.gradle +++ b/x-pack/plugin/graph/build.gradle @@ -13,6 +13,9 @@ dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } } // add all sub-projects of the qa sub-project diff --git a/x-pack/plugin/ilm/build.gradle b/x-pack/plugin/ilm/build.gradle index e6962e3c3bf72..2b80ddc04207b 100644 --- a/x-pack/plugin/ilm/build.gradle +++ b/x-pack/plugin/ilm/build.gradle @@ -16,6 +16,9 @@ dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } } // add all sub-projects of the qa sub-project diff --git a/x-pack/plugin/ilm/qa/rest/build.gradle b/x-pack/plugin/ilm/qa/rest/build.gradle index c69a3dfce2143..75ba234e55569 100644 --- a/x-pack/plugin/ilm/qa/rest/build.gradle +++ b/x-pack/plugin/ilm/qa/rest/build.gradle @@ -4,6 +4,9 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('ilm'), configuration: 'runtime') } diff --git a/x-pack/plugin/logstash/build.gradle b/x-pack/plugin/logstash/build.gradle index 476d3f17cad41..b799dbd4ceb95 100644 --- a/x-pack/plugin/logstash/build.gradle +++ b/x-pack/plugin/logstash/build.gradle @@ -13,6 +13,9 @@ dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } } integTest.enabled = false diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index 9bd4d445892e6..c09ced75bdf06 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -53,6 +53,9 @@ dependencies { compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${versions.elasticsearch}" testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } // This should not be here testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index bc0b0ca5b7b03..93dc8c4fec131 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -5,6 +5,9 @@ dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle index 22fd7837628b9..e6fd8412c948b 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle @@ -5,6 +5,9 @@ dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('ml'), configuration: 'runtime') testCompile project(path: xpackModule('ml'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/monitoring/build.gradle b/x-pack/plugin/monitoring/build.gradle index b2e0c930e0d61..cb6395b18a4c2 100644 --- a/x-pack/plugin/monitoring/build.gradle +++ b/x-pack/plugin/monitoring/build.gradle @@ -13,6 +13,9 @@ dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } // monitoring deps compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" diff --git a/x-pack/plugin/rollup/build.gradle b/x-pack/plugin/rollup/build.gradle index d159f3334b998..ae53f78dad2ff 100644 --- a/x-pack/plugin/rollup/build.gradle +++ b/x-pack/plugin/rollup/build.gradle @@ -19,6 +19,9 @@ dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } } integTest.enabled = false diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index 27d0db1a48a86..cc5833d20e440 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -22,6 +22,9 @@ dependencies { testCompile project(path: xpackModule('sql:sql-action')) testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } compile 'com.unboundid:unboundid-ldapsdk:4.0.8' compileOnly 'org.bouncycastle:bcprov-jdk15on:1.59' @@ -322,4 +325,3 @@ gradle.projectsEvaluated { .findAll { it.path.startsWith(project.path + ":qa") } .each { check.dependsOn it.check } } - diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 00321c77808cd..29f278b95defa 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -13,6 +13,9 @@ dependencies { testImplementation 'com.google.jimfs:jimfs:1.1' testCompile "org.elasticsearch.test:framework:${version}" testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } } dependencyLicenses { diff --git a/x-pack/plugin/security/qa/tls-basic/build.gradle b/x-pack/plugin/security/qa/tls-basic/build.gradle index 9f5ef26f6e6a6..6487475f7c8c3 100644 --- a/x-pack/plugin/security/qa/tls-basic/build.gradle +++ b/x-pack/plugin/security/qa/tls-basic/build.gradle @@ -8,6 +8,9 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } } forbiddenPatterns { @@ -45,4 +48,3 @@ integTestCluster { return http.wait(5000) } } - diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index c4719aef04ab8..14d80ab50ee3f 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -12,7 +12,7 @@ ext { // SQL dependency versions jlineVersion="3.10.0" antlrVersion="4.5.3" - + // SQL test dependency versions csvjdbcVersion="1.0.34" h2Version="1.4.197" @@ -48,6 +48,9 @@ dependencies { compile "org.antlr:antlr4-runtime:4.5.3" testCompile "org.elasticsearch.test:framework:${version}" testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: ':modules:reindex', configuration: 'runtime') testCompile project(path: ':modules:parent-join', configuration: 'runtime') diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index 3c7eb6b804b5a..9a15bcf29c0a1 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -25,6 +25,9 @@ dependencies { runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "org.elasticsearch.test:framework:${version}" testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } } dependencyLicenses { diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index a44665ca25c0f..1585488bdb5b3 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -31,6 +31,9 @@ dependencies { compileOnly project(path: ':plugins:transport-nio', configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile "org.elasticsearch.plugin:x-pack-ilm:${version}" // watcher deps @@ -47,7 +50,7 @@ dependencies { } // classes are missing, e.g. com.ibm.icu.lang.UCharacter -thirdPartyAudit { +thirdPartyAudit { ignoreViolations ( // uses internal java api: sun.misc.Unsafe 'com.google.common.cache.Striped64', diff --git a/x-pack/qa/evil-tests/build.gradle b/x-pack/qa/evil-tests/build.gradle index d411909fb310b..bbb1c5804086f 100644 --- a/x-pack/qa/evil-tests/build.gradle +++ b/x-pack/qa/evil-tests/build.gradle @@ -2,6 +2,9 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') } diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle index 88248f89b72c5..e4c261b4c5d57 100644 --- a/x-pack/qa/kerberos-tests/build.gradle +++ b/x-pack/qa/kerberos-tests/build.gradle @@ -14,6 +14,9 @@ integTest.enabled = false dependencies { testCompile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') } @@ -74,4 +77,3 @@ task copyKeytabToGeneratedResources(type: Copy) { dependsOn project(':test:fixtures:krb5kdc-fixture').postProcessFixture } project.sourceSets.test.output.dir(generatedResources, builtBy:copyKeytabToGeneratedResources) - diff --git a/x-pack/qa/oidc-op-tests/build.gradle b/x-pack/qa/oidc-op-tests/build.gradle index 52e581f60a580..34325bc69b624 100644 --- a/x-pack/qa/oidc-op-tests/build.gradle +++ b/x-pack/qa/oidc-op-tests/build.gradle @@ -8,6 +8,9 @@ dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') } testFixtures.useFixture ":x-pack:test:idp-fixture" @@ -61,7 +64,7 @@ integTestCluster { setting 'xpack.security.authc.realms.oidc.c2id-implicit.claims.mail', 'email' setting 'xpack.security.authc.realms.oidc.c2id-implicit.claims.groups', 'groups' setting 'xpack.ml.enabled', 'false' - + extraConfigFile 'op-jwks.json', idpFixtureProject.file("oidc/op-jwks.json") setupCommand 'setupTestAdmin', @@ -79,4 +82,4 @@ integTestCluster { } } -thirdPartyAudit.enabled = false \ No newline at end of file +thirdPartyAudit.enabled = false diff --git a/x-pack/qa/openldap-tests/build.gradle b/x-pack/qa/openldap-tests/build.gradle index 5305699b9a0c7..f732d8fc5b030 100644 --- a/x-pack/qa/openldap-tests/build.gradle +++ b/x-pack/qa/openldap-tests/build.gradle @@ -6,6 +6,9 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } } testFixtures.useFixture ":x-pack:test:idp-fixture" diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 7cbdfae5ed48c..5a201832e7c39 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -8,6 +8,9 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } testCompile project(path: ':modules:reindex') } diff --git a/x-pack/qa/saml-idp-tests/build.gradle b/x-pack/qa/saml-idp-tests/build.gradle index 4355ac0b5b825..25d9d9037b719 100644 --- a/x-pack/qa/saml-idp-tests/build.gradle +++ b/x-pack/qa/saml-idp-tests/build.gradle @@ -103,7 +103,7 @@ thirdPartyAudit { 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1' ) - ignoreMissingClasses ( + ignoreMissingClasses ( 'com.ibm.icu.lang.UCharacter' ) } diff --git a/x-pack/qa/security-setup-password-tests/build.gradle b/x-pack/qa/security-setup-password-tests/build.gradle index a99fa2d543861..604fffbe32f76 100644 --- a/x-pack/qa/security-setup-password-tests/build.gradle +++ b/x-pack/qa/security-setup-password-tests/build.gradle @@ -6,6 +6,9 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } } integTestRunner { From 932c2b63b2129e4e5cae70e1d1a2cc17cd4344f3 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 9 May 2019 11:47:36 +0200 Subject: [PATCH 033/321] Clean up ShardId usage of Streamable (#41843) ShardId already implements Writeable so there is no need for it to implement Streamable too. Also the readShardId static method can be easily replaced with direct usages of the constructor that takes a StreamInput as argument. --- .../action/DocWriteResponse.java | 2 +- .../shards/ClusterSearchShardsGroup.java | 2 +- .../status/TransportNodesSnapshotsStatus.java | 2 +- .../admin/indices/stats/IndexShardStats.java | 2 +- .../upgrade/post/ShardUpgradeResult.java | 4 +- .../action/bulk/BulkShardResponse.java | 2 +- .../broadcast/BroadcastShardRequest.java | 2 +- .../broadcast/BroadcastShardResponse.java | 4 +- .../replication/ReplicationRequest.java | 2 +- .../replication/ReplicationResponse.java | 2 +- .../InstanceShardOperationRequest.java | 4 +- .../single/shard/SingleShardRequest.java | 5 +-- .../cluster/RestoreInProgress.java | 2 +- .../cluster/SnapshotsInProgress.java | 2 +- .../action/shard/ShardStateAction.java | 4 +- .../cluster/routing/ShardRouting.java | 2 +- ...ransportNodesListGatewayStartedShards.java | 4 +- .../index/seqno/RetentionLeaseActions.java | 2 +- .../elasticsearch/index/shard/ShardId.java | 38 +++++++------------ .../flush/ShardsSyncedFlushResult.java | 2 +- .../indices/flush/SyncedFlushService.java | 6 +-- .../recovery/RecoveryCleanFilesRequest.java | 2 +- .../recovery/RecoveryFileChunkRequest.java | 2 +- .../recovery/RecoveryFilesInfoRequest.java | 2 +- .../RecoveryFinalizeRecoveryRequest.java | 2 +- .../RecoveryHandoffPrimaryContextRequest.java | 2 +- ...ryPrepareForTranslogOperationsRequest.java | 2 +- .../indices/recovery/RecoveryState.java | 2 +- .../RecoveryTranslogOperationsRequest.java | 2 +- .../RecoveryWaitForClusterStateRequest.java | 2 +- .../recovery/StartRecoveryRequest.java | 2 +- .../indices/store/IndicesStore.java | 2 +- .../TransportNodesListShardStoreMetaData.java | 6 +-- .../search/SearchShardTarget.java | 2 +- .../internal/ShardSearchLocalRequest.java | 2 +- .../snapshots/SnapshotShardFailure.java | 2 +- .../snapshots/SnapshotShardsService.java | 2 +- .../xpack/ccr/action/ShardChangesAction.java | 2 +- .../xpack/ccr/action/ShardFollowTask.java | 4 +- .../PutCcrRestoreSessionRequest.java | 2 +- 40 files changed, 63 insertions(+), 76 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 465a93914410b..80225c3a60ce4 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -259,7 +259,7 @@ public String getLocation(@Nullable String routing) { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); type = in.readString(); id = in.readString(); version = in.readZLong(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java index 1c6bf90c4149d..d8948958d7a07 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java @@ -40,7 +40,7 @@ public ClusterSearchShardsGroup(ShardId shardId, ShardRouting[] shards) { } ClusterSearchShardsGroup(StreamInput in) throws IOException { - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); shards = new ShardRouting[in.readVInt()]; for (int i = 0; i < shards.length; i++) { shards[i] = new ShardRouting(shardId, in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 1737756fe7db9..8f71090cc469f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -214,7 +214,7 @@ public void readFrom(StreamInput in) throws IOException { int numberOfShards = in.readVInt(); Map shardMapBuilder = new HashMap<>(numberOfShards); for (int j = 0; j < numberOfShards; j++) { - ShardId shardId = ShardId.readShardId(in); + ShardId shardId = new ShardId(in); SnapshotIndexShardStatus status = SnapshotIndexShardStatus.readShardSnapshotStatus(in); shardMapBuilder.put(shardId, status); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java index 6cdcabccbc481..6c1de5b2992c4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java @@ -90,7 +90,7 @@ public CommonStats getPrimary() { @Override public void readFrom(StreamInput in) throws IOException { - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); int shardSize = in.readVInt(); shards = new ShardStats[shardSize]; for (int i = 0; i < shardSize; i++) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java index cca5a812c3e96..7b57c9680bd16 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResult.java @@ -68,7 +68,7 @@ public boolean primary() { @Override public void readFrom(StreamInput in) throws IOException { - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); primary = in.readBoolean(); upgradeVersion = Version.readVersion(in); try { @@ -86,4 +86,4 @@ public void writeTo(StreamOutput out) throws IOException { Version.writeVersion(upgradeVersion, out); out.writeString(oldestLuceneSegment.toString()); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index aa368c13fb80e..fc58e620738da 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -67,7 +67,7 @@ public void setForcedRefresh(boolean forcedRefresh) { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); responses = new BulkItemResponse[in.readVInt()]; for (int i = 0; i < responses.length; i++) { responses[i] = BulkItemResponse.readBulkItem(in); diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java index 3869ab891b837..f61ff4ac748b0 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardRequest.java @@ -60,7 +60,7 @@ public IndicesOptions indicesOptions() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); originalIndices = OriginalIndices.readOriginalIndices(in); } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java index 6845e6ced6cee..51e59a8c18982 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java @@ -49,7 +49,7 @@ public ShardId getShardId() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); } @Override @@ -57,4 +57,4 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 31d18d4dc0537..857103071e022 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -69,7 +69,7 @@ public abstract class ReplicationRequest builder = ImmutableOpenMap.builder(); int shards = in.readVInt(); for (int j = 0; j < shards; j++) { - ShardId shardId = ShardId.readShardId(in); + ShardId shardId = new ShardId(in); ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); builder.put(shardId, shardState); } diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index b5827dd01a1d1..5190adf7ba2d9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -432,7 +432,7 @@ public SnapshotsInProgress(StreamInput in) throws IOException { ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); int shards = in.readVInt(); for (int j = 0; j < shards; j++) { - ShardId shardId = ShardId.readShardId(in); + ShardId shardId = new ShardId(in); builder.put(shardId, new ShardSnapshotStatus(in)); } long repositoryStateId = in.readLong(); diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index c86da4b789bf0..1f9a45c4713f6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -394,7 +394,7 @@ public static class FailedShardEntry extends TransportRequest { FailedShardEntry(StreamInput in) throws IOException { super(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); allocationId = in.readString(); primaryTerm = in.readVLong(); message = in.readString(); @@ -592,7 +592,7 @@ public static class StartedShardEntry extends TransportRequest { StartedShardEntry(StreamInput in) throws IOException { super(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); allocationId = in.readString(); primaryTerm = in.readVLong(); this.message = in.readString(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index bfc4ce0618833..bbe82c42cf71c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -279,7 +279,7 @@ public ShardRouting(ShardId shardId, StreamInput in) throws IOException { } public ShardRouting(StreamInput in) throws IOException { - this(ShardId.readShardId(in), in); + this(new ShardId(in), in); } /** diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index e2eb75458c0cc..c9e7100ebd66e 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -185,7 +185,7 @@ public ShardId shardId() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); } @Override @@ -229,7 +229,7 @@ public NodeRequest(String nodeId, Request request) { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java index c503f1fa16377..dfa985cc5a684 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java @@ -302,7 +302,7 @@ public ActionRequestValidationException validate() { @Override public void readFrom(final StreamInput in) throws IOException { super.readFrom(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); id = in.readString(); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardId.java b/server/src/main/java/org/elasticsearch/index/shard/ShardId.java index 77e1ae00e964f..e3becbef7dd1e 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardId.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardId.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -33,20 +32,12 @@ /** * Allows for shard level components to be injected with the shard id. */ -public class ShardId implements Streamable, Comparable, ToXContentFragment, Writeable { +public class ShardId implements Comparable, ToXContentFragment, Writeable { private final Index index; - private final int shardId; - private final int hashCode; - public ShardId(StreamInput in) throws IOException { - index = new Index(in); - shardId = in.readVInt(); - hashCode = computeHashCode(); - } - public ShardId(Index index, int shardId) { this.index = index; this.shardId = shardId; @@ -57,6 +48,18 @@ public ShardId(String index, String indexUUID, int shardId) { this(new Index(index, indexUUID), shardId); } + public ShardId(StreamInput in) throws IOException { + index = new Index(in); + shardId = in.readVInt(); + hashCode = computeHashCode(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + index.writeTo(out); + out.writeVInt(shardId); + } + public Index getIndex() { return index; } @@ -113,21 +116,6 @@ private int computeHashCode() { return result; } - public static ShardId readShardId(StreamInput in) throws IOException { - return new ShardId(in); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - index.writeTo(out); - out.writeVInt(shardId); - } - @Override public int compareTo(ShardId o) { if (o.getId() == shardId) { diff --git a/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java b/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java index bd0451498c6ba..b44feab6e603f 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java @@ -148,7 +148,7 @@ public void readFrom(StreamInput in) throws IOException { shardResponses.put(shardRouting, response); } syncId = in.readOptionalString(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); totalShards = in.readInt(); } diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 79a2d6c3c0a91..921a8f9cc7c47 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -567,7 +567,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - this.shardId = ShardId.readShardId(in); + this.shardId = new ShardId(in); } public ShardId shardId() { @@ -647,7 +647,7 @@ public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId e @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); expectedCommitId = new Engine.CommitId(in); syncId = in.readString(); } @@ -749,7 +749,7 @@ public InFlightOpsRequest(ShardId shardId) { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java index 8d847dcef91c7..2f166e59ac836 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java @@ -49,7 +49,7 @@ public class RecoveryCleanFilesRequest extends TransportRequest { RecoveryCleanFilesRequest(StreamInput in) throws IOException { super(in); recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); snapshotFiles = new Store.MetadataSnapshot(in); totalTranslogOps = in.readVInt(); if (in.getVersion().onOrAfter(Version.V_7_2_0)) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java index 196e2cdf76ba9..59480ccbe4233 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java @@ -96,7 +96,7 @@ public long sourceThrottleTimeInNanos() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); String name = in.readString(); position = in.readVLong(); long length = in.readVLong(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java index 2143127c234f5..b1fdef06ed518 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java @@ -66,7 +66,7 @@ public ShardId shardId() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); int size = in.readVInt(); phase1FileNames = new ArrayList<>(size); for (int i = 0; i < size; i++) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java index c21a112320c40..82036338be7ca 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java @@ -57,7 +57,7 @@ public long globalCheckpoint() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); globalCheckpoint = in.readZLong(); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java index a4a87cf2d60ba..bccb917646fe7 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java @@ -72,7 +72,7 @@ ReplicationTracker.PrimaryContext primaryContext() { public void readFrom(final StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); primaryContext = new ReplicationTracker.PrimaryContext(in); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index 1d2be7bafda8a..6e2557176a82e 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -43,7 +43,7 @@ class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { RecoveryPrepareForTranslogOperationsRequest(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); totalTranslogOps = in.readVInt(); fileBasedRecovery = in.readBoolean(); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index 7393eccc44e86..18018fc7db054 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -134,7 +134,7 @@ public RecoveryState(ShardRouting shardRouting, DiscoveryNode targetNode, @Nulla public RecoveryState(StreamInput in) throws IOException { timer = new Timer(in); stage = Stage.fromId(in.readByte()); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); recoverySource = RecoverySource.readFrom(in); targetNode = new DiscoveryNode(in); sourceNode = in.readOptionalWriteable(DiscoveryNode::new); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java index 9608455216746..ca3d85de419b9 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -100,7 +100,7 @@ long mappingVersionOnPrimary() { RecoveryTranslogOperationsRequest(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); operations = Translog.readOperations(in, "recovery"); totalTranslogOps = in.readVInt(); maxSeenAutoIdTimestampOnPrimary = in.readZLong(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryWaitForClusterStateRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryWaitForClusterStateRequest.java index e45cf3f7d1611..d8ac7e59d73d1 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryWaitForClusterStateRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryWaitForClusterStateRequest.java @@ -56,7 +56,7 @@ public long clusterStateVersion() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); clusterStateVersion = in.readVLong(); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java index 6c4e7b744b729..4ec20d17ac5be 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java @@ -114,7 +114,7 @@ public long startingSeqNo() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); targetAllocationId = in.readString(); sourceNode = new DiscoveryNode(in); targetNode = new DiscoveryNode(in); diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 7a07d8f62b229..bfb271c7b089e 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -412,7 +412,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); clusterName = new ClusterName(in); indexUUID = in.readString(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); timeout = new TimeValue(in.readLong(), TimeUnit.MILLISECONDS); } diff --git a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index 5ebbdab39835b..bc041b4b322ae 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -203,7 +203,7 @@ public static StoreFilesMetaData readStoreFilesMetaData(StreamInput in) throws I @Override public void readFrom(StreamInput in) throws IOException { - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); this.metadataSnapshot = new Store.MetadataSnapshot(in); } @@ -245,7 +245,7 @@ public Request(ShardId shardId, DiscoveryNode[] nodes) { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); } @Override @@ -288,7 +288,7 @@ public NodeRequest() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java index 6aadf8425997d..4bc3e3ae986da 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -49,7 +49,7 @@ public SearchShardTarget(StreamInput in) throws IOException { } else { nodeId = null; } - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); this.originalIndices = null; clusterAlias = in.readOptionalString(); } diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index f39317712fe0c..12eef5dcf29de 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -107,7 +107,7 @@ private ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType } ShardSearchLocalRequest(StreamInput in) throws IOException { - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); searchType = SearchType.fromId(in.readByte()); numberOfShards = in.readVInt(); scroll = in.readOptionalWriteable(Scroll::new); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java index 28202fa2c0b97..d95fdb0a55692 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardFailure.java @@ -99,7 +99,7 @@ static SnapshotShardFailure readSnapshotShardFailure(StreamInput in) throws IOEx @Override public void readFrom(StreamInput in) throws IOException { nodeId = in.readOptionalString(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); super.shardId = shardId.getId(); index = shardId.getIndexName(); reason = in.readString(); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 9b6ab76a98745..f052a1c7abeb8 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -448,7 +448,7 @@ public ActionRequestValidationException validate() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); snapshot = new Snapshot(in); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); status = new ShardSnapshotStatus(in); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 20b13474afa82..0ef14115eab32 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -146,7 +146,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); fromSeqNo = in.readVLong(); maxOperationCount = in.readVInt(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); expectedHistoryUUID = in.readString(); pollTimeout = in.readTimeValue(); maxBatchSize = new ByteSizeValue(in); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java index b00bf15782fa3..5d4564a2030c1 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java @@ -92,8 +92,8 @@ public class ShardFollowTask extends ImmutableFollowParameters implements XPackP public static ShardFollowTask readFrom(StreamInput in) throws IOException { String remoteCluster = in.readString(); - ShardId followShardId = ShardId.readShardId(in); - ShardId leaderShardId = ShardId.readShardId(in); + ShardId followShardId = new ShardId(in); + ShardId leaderShardId = new ShardId(in); return new ShardFollowTask(remoteCluster, followShardId, leaderShardId, in); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionRequest.java index da0c43116ee76..e348df79223a7 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionRequest.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionRequest.java @@ -37,7 +37,7 @@ public ActionRequestValidationException validate() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); sessionUUID = in.readString(); - shardId = ShardId.readShardId(in); + shardId = new ShardId(in); } @Override From f2a558d7ec9b65f24070e5926e1433a66b41ece4 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 9 May 2019 11:48:05 +0200 Subject: [PATCH 034/321] Cut over SearchResponse and SearchTemplateResponse to Writeable (#41855) Relates to #34389 --- .../noop/action/search/NoopSearchAction.java | 10 ++- .../mustache/MultiSearchTemplateResponse.java | 3 +- .../script/mustache/SearchTemplateAction.java | 8 ++- .../mustache/SearchTemplateResponse.java | 12 ++-- .../action/search/MultiSearchResponse.java | 3 +- .../action/search/SearchAction.java | 8 ++- .../action/search/SearchResponse.java | 68 ++++++++----------- .../action/search/SearchScrollAction.java | 8 ++- .../search/MultiSearchActionTookTests.java | 4 +- .../action/search/SearchAsyncActionTests.java | 13 ++-- .../action/search/SearchResponseTests.java | 2 +- .../TransportMultiSearchActionTests.java | 4 +- .../rollup/action/RollupSearchAction.java | 12 ++-- .../xpack/core/ClientHelperTests.java | 11 ++- 14 files changed, 99 insertions(+), 67 deletions(-) diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java index e7e515594a55d..9b390e1ffddbc 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java @@ -20,17 +20,23 @@ import org.elasticsearch.action.Action; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.io.stream.Writeable; public class NoopSearchAction extends Action { public static final NoopSearchAction INSTANCE = new NoopSearchAction(); public static final String NAME = "mock:data/read/search"; - public NoopSearchAction() { + private NoopSearchAction() { super(NAME); } @Override public SearchResponse newResponse() { - return new SearchResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return SearchResponse::new; } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index dd8cdc04457ad..68adcffc4c4d0 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -49,8 +49,7 @@ public static class Item implements Writeable { private Item(StreamInput in) throws IOException { if (in.readBoolean()) { - this.response = new SearchTemplateResponse(); - response.readFrom(in); + this.response = new SearchTemplateResponse(in); this.exception = null; } else { exception = in.readException(); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java index a08329f48dcbb..5d905ec39e1ab 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class SearchTemplateAction extends Action { @@ -32,6 +33,11 @@ private SearchTemplateAction() { @Override public SearchTemplateResponse newResponse() { - return new SearchTemplateResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return SearchTemplateResponse::new; } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java index 6d19afbfd6fe6..52a3a8b28826c 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java @@ -48,6 +48,12 @@ public class SearchTemplateResponse extends ActionResponse implements StatusToXC SearchTemplateResponse() { } + SearchTemplateResponse(StreamInput in) throws IOException { + super(in); + source = in.readOptionalBytesReference(); + response = in.readOptionalWriteable(SearchResponse::new); + } + public BytesReference getSource() { return source; } @@ -81,10 +87,8 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - source = in.readOptionalBytesReference(); - response = in.readOptionalStreamable(SearchResponse::new); + public void readFrom(StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } public static SearchTemplateResponse fromXContent(XContentParser parser) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index a924105bff6a2..10163502391cd 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -70,8 +70,7 @@ public Item(SearchResponse response, Exception exception) { Item(StreamInput in) throws IOException{ if (in.readBoolean()) { - this.response = new SearchResponse(); - this.response.readFrom(in); + this.response = new SearchResponse(in); this.exception = null; } else { this.exception = in.readException(); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchAction.java index e028f6c5cd524..d665595e8d34d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class SearchAction extends Action { @@ -32,6 +33,11 @@ private SearchAction() { @Override public SearchResponse newResponse() { - return new SearchResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return SearchResponse::new; } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index b9b1887be2da8..b93b99ae15998 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -66,23 +66,33 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb private static final ParseField TERMINATED_EARLY = new ParseField("terminated_early"); private static final ParseField NUM_REDUCE_PHASES = new ParseField("num_reduce_phases"); - private SearchResponseSections internalResponse; - - private String scrollId; - - private int totalShards; - - private int successfulShards; - - private int skippedShards; - - private ShardSearchFailure[] shardFailures; - - private Clusters clusters; - - private long tookInMillis; - - public SearchResponse() { + private final SearchResponseSections internalResponse; + private final String scrollId; + private final int totalShards; + private final int successfulShards; + private final int skippedShards; + private final ShardSearchFailure[] shardFailures; + private final Clusters clusters; + private final long tookInMillis; + + public SearchResponse(StreamInput in) throws IOException { + super(in); + internalResponse = new InternalSearchResponse(in); + totalShards = in.readVInt(); + successfulShards = in.readVInt(); + int size = in.readVInt(); + if (size == 0) { + shardFailures = ShardSearchFailure.EMPTY_ARRAY; + } else { + shardFailures = new ShardSearchFailure[size]; + for (int i = 0; i < shardFailures.length; i++) { + shardFailures[i] = readShardSearchFailure(in); + } + } + clusters = new Clusters(in); + scrollId = in.readOptionalString(); + tookInMillis = in.readVLong(); + skippedShards = in.readVInt(); } public SearchResponse(SearchResponseSections internalResponse, String scrollId, int totalShards, int successfulShards, @@ -193,10 +203,6 @@ public String getScrollId() { return scrollId; } - public void scrollId(String scrollId) { - this.scrollId = scrollId; - } - /** * If profiling was enabled, this returns an object containing the profile results from * each shard. If profiling was not enabled, this will return null @@ -355,24 +361,8 @@ static SearchResponse innerFromXContent(XContentParser parser) throws IOExceptio } @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - internalResponse = new InternalSearchResponse(in); - totalShards = in.readVInt(); - successfulShards = in.readVInt(); - int size = in.readVInt(); - if (size == 0) { - shardFailures = ShardSearchFailure.EMPTY_ARRAY; - } else { - shardFailures = new ShardSearchFailure[size]; - for (int i = 0; i < shardFailures.length; i++) { - shardFailures[i] = readShardSearchFailure(in); - } - } - clusters = new Clusters(in); - scrollId = in.readOptionalString(); - tookInMillis = in.readVLong(); - skippedShards = in.readVInt(); + public void readFrom(StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java index ff72a7e5e51f3..0b4adfc1ba55c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class SearchScrollAction extends Action { @@ -32,6 +33,11 @@ private SearchScrollAction() { @Override public SearchResponse newResponse() { - return new SearchResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return SearchResponse::new; } } diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java index ece695575a107..01f1109ef3bed 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; @@ -154,7 +155,8 @@ public void search(final SearchRequest request, final ActionListener { counter.decrementAndGet(); - listener.onResponse(new SearchResponse()); + listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, 0, 0L, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY)); }); } }; diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 23abefea15fc7..6a6b1c54db225 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; @@ -406,13 +407,17 @@ static GroupShardsIterator getShardsIter(String index, Orig } public static class TestSearchResponse extends SearchResponse { - public final Set queried = new HashSet<>(); + final Set queried = new HashSet<>(); + + TestSearchResponse() { + super(InternalSearchResponse.empty(), null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, Clusters.EMPTY); + } } public static class TestSearchPhaseResult extends SearchPhaseResult { final DiscoveryNode node; - public TestSearchPhaseResult(long id, DiscoveryNode node) { + TestSearchPhaseResult(long id, DiscoveryNode node) { this.requestId = id; this.node = node; } @@ -427,7 +432,7 @@ public static final class MockConnection implements Transport.Connection { private final DiscoveryNode node; - public MockConnection(DiscoveryNode node) { + MockConnection(DiscoveryNode node) { this.node = node; } @@ -438,7 +443,7 @@ public DiscoveryNode getNode() { @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { + throws TransportException { throw new UnsupportedOperationException(); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index 18890e1339557..ed5104cabe5d2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -278,7 +278,7 @@ public void testToXContent() { public void testSerialization() throws IOException { SearchResponse searchResponse = createTestItem(false); - SearchResponse deserialized = copyStreamable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.CURRENT); + SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.CURRENT); if (searchResponse.getHits().getTotalHits() == null) { assertNull(deserialized.getHits().getTotalHits()); } else { diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index dade5eadb1832..7ecc172924b1c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -117,7 +118,8 @@ public void search(final SearchRequest request, final ActionListener { counter.decrementAndGet(); - listener.onResponse(new SearchResponse()); + listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, 0, 0L, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY)); }); } }; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java index 595df0f8c31ad..c6eecca5e3d80 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.Writeable; public class RollupSearchAction extends Action { @@ -22,16 +23,17 @@ private RollupSearchAction() { @Override public SearchResponse newResponse() { - return new SearchResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return SearchResponse::new; } public static class RequestBuilder extends ActionRequestBuilder { public RequestBuilder(ElasticsearchClient client, SearchRequest searchRequest) { super(client, INSTANCE, searchRequest); } - - RequestBuilder(ElasticsearchClient client) { - super(client, INSTANCE, new SearchRequest()); - } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java index 1a0a8e764124d..9641f09711995 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java @@ -12,11 +12,13 @@ import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.AuthenticationField; @@ -232,7 +234,8 @@ public void testExecuteWithHeadersNoHeaders() { when(client.threadPool()).thenReturn(threadPool); PlainActionFuture searchFuture = PlainActionFuture.newFuture(); - searchFuture.onResponse(new SearchResponse()); + searchFuture.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY)); when(client.search(any())).thenReturn(searchFuture); assertExecutionWithOrigin(Collections.emptyMap(), client); } @@ -245,7 +248,8 @@ public void testExecuteWithHeaders() { when(client.threadPool()).thenReturn(threadPool); PlainActionFuture searchFuture = PlainActionFuture.newFuture(); - searchFuture.onResponse(new SearchResponse()); + searchFuture.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY)); when(client.search(any())).thenReturn(searchFuture); Map headers = MapBuilder. newMapBuilder().put(AuthenticationField.AUTHENTICATION_KEY, "anything") .put(AuthenticationServiceField.RUN_AS_USER_HEADER, "anything").map(); @@ -265,7 +269,8 @@ public void testExecuteWithHeadersNoSecurityHeaders() { when(client.threadPool()).thenReturn(threadPool); PlainActionFuture searchFuture = PlainActionFuture.newFuture(); - searchFuture.onResponse(new SearchResponse()); + searchFuture.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY)); when(client.search(any())).thenReturn(searchFuture); Map unrelatedHeaders = MapBuilder. newMapBuilder().put(randomAlphaOfLength(10), "anything").map(); From 2592b4937696b948334ccd328deed0860ec4ea09 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 9 May 2019 06:30:46 -0400 Subject: [PATCH 035/321] Remove manual parsing of JVM options (#41962) This commit removes manual parsing of JVM options when calculating ergonomics. This is to avoid a situation that we parse values differently than the JVM would. In fact, we already have a bug along these lines today. It is possible to start the JVM with the same flag multiple times on the command line. In this case, the last value wins. For example, -Xmx1g -Xmx2g would start the JVM with a heap size of two gigabytes. Our JVM ergonomics ignores this possibility and instead the first value is winning! Our strategy to avoid manual parsing of the JVM options is to start the Java command line parser (without actually starting a JVM) by invoking java with the same command line flags as presented and request that the JVM tell us what values it would start with. This ensures that we have the correct values when making ergonomic decisions. Moreover, our strategy also is ignoring ES_JAVA_OPTS which could override the heap size as well leading to incorrect ergonomic choices. This commit address this issue too. --- distribution/src/bin/elasticsearch | 2 +- .../src/bin/elasticsearch-service.bat | 2 +- distribution/src/bin/elasticsearch.bat | 2 +- .../tools/launchers/JvmErgonomics.java | 111 +++++++++++------- .../tools/launchers/JvmOptionsParser.java | 21 +++- .../tools/launchers/JvmErgonomicsTests.java | 67 ++++++++--- 6 files changed, 138 insertions(+), 67 deletions(-) diff --git a/distribution/src/bin/elasticsearch b/distribution/src/bin/elasticsearch index 8bdea4950cb75..6843607efa19b 100755 --- a/distribution/src/bin/elasticsearch +++ b/distribution/src/bin/elasticsearch @@ -18,7 +18,7 @@ source "`dirname "$0"`"/elasticsearch-env ES_JVM_OPTIONS="$ES_PATH_CONF"/jvm.options JVM_OPTIONS=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.JvmOptionsParser "$ES_JVM_OPTIONS"` -ES_JAVA_OPTS="${JVM_OPTIONS//\$\{ES_TMPDIR\}/$ES_TMPDIR} $ES_JAVA_OPTS" +ES_JAVA_OPTS="${JVM_OPTIONS//\$\{ES_TMPDIR\}/$ES_TMPDIR}" # manual parsing to find out, if process should be detached if ! echo $* | grep -E '(^-d |-d$| -d |--daemonize$|--daemonize )' > /dev/null; then diff --git a/distribution/src/bin/elasticsearch-service.bat b/distribution/src/bin/elasticsearch-service.bat index 7a0be55c4f565..2f9c280743dfb 100644 --- a/distribution/src/bin/elasticsearch-service.bat +++ b/distribution/src/bin/elasticsearch-service.bat @@ -112,7 +112,7 @@ if not "%ES_JAVA_OPTS%" == "" set ES_JAVA_OPTS=%ES_JAVA_OPTS: =;% @setlocal for /F "usebackq delims=" %%a in (`"%JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" || echo jvm_options_parser_failed"`) do set JVM_OPTIONS=%%a -@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%JVM_OPTIONS%" & set ES_JAVA_OPTS=%JVM_OPTIONS:${ES_TMPDIR}=!ES_TMPDIR!% %ES_JAVA_OPTS% +@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%JVM_OPTIONS%" & set ES_JAVA_OPTS=%JVM_OPTIONS:${ES_TMPDIR}=!ES_TMPDIR!% if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( exit /b 1 diff --git a/distribution/src/bin/elasticsearch.bat b/distribution/src/bin/elasticsearch.bat index ecbbad826e797..f14185ddc4a27 100644 --- a/distribution/src/bin/elasticsearch.bat +++ b/distribution/src/bin/elasticsearch.bat @@ -44,7 +44,7 @@ IF ERRORLEVEL 1 ( set ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options @setlocal for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" ^|^| echo jvm_options_parser_failed`) do set JVM_OPTIONS=%%a -@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%JVM_OPTIONS%" & set ES_JAVA_OPTS=%JVM_OPTIONS:${ES_TMPDIR}=!ES_TMPDIR!% %ES_JAVA_OPTS% +@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%JVM_OPTIONS%" & set ES_JAVA_OPTS=%JVM_OPTIONS:${ES_TMPDIR}=!ES_TMPDIR!% if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( exit /b 1 diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java index 761cd9e1be5db..12757c970496a 100644 --- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java +++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java @@ -19,24 +19,27 @@ package org.elasticsearch.tools.launchers; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * Tunes Elasticsearch JVM settings based on inspection of provided JVM options. */ final class JvmErgonomics { - private static final long KB = 1024L; - - private static final long MB = 1024L * 1024L; - - private static final long GB = 1024L * 1024L * 1024L; - private JvmErgonomics() { throw new AssertionError("No instances intended"); @@ -48,48 +51,73 @@ private JvmErgonomics() { * @param userDefinedJvmOptions A list of JVM options that have been defined by the user. * @return A list of additional JVM options to set. */ - static List choose(List userDefinedJvmOptions) { - List ergonomicChoices = new ArrayList<>(); - Long heapSize = extractHeapSize(userDefinedJvmOptions); - Map systemProperties = extractSystemProperties(userDefinedJvmOptions); - if (heapSize != null) { - if (systemProperties.containsKey("io.netty.allocator.type") == false) { - if (heapSize <= 1 * GB) { - ergonomicChoices.add("-Dio.netty.allocator.type=unpooled"); - } else { - ergonomicChoices.add("-Dio.netty.allocator.type=pooled"); - } + static List choose(final List userDefinedJvmOptions) throws InterruptedException, IOException { + final List ergonomicChoices = new ArrayList<>(); + final Map> finalJvmOptions = finalJvmOptions(userDefinedJvmOptions); + final long heapSize = extractHeapSize(finalJvmOptions); + final Map systemProperties = extractSystemProperties(userDefinedJvmOptions); + if (systemProperties.containsKey("io.netty.allocator.type") == false) { + if (heapSize <= 1 << 30) { + ergonomicChoices.add("-Dio.netty.allocator.type=unpooled"); + } else { + ergonomicChoices.add("-Dio.netty.allocator.type=pooled"); } } return ergonomicChoices; } - private static final Pattern MAX_HEAP_SIZE = Pattern.compile("^(-Xmx|-XX:MaxHeapSize=)(?\\d+)(?\\w)?$"); + private static final Pattern OPTION = + Pattern.compile("^\\s*\\S+\\s+(?\\S+)\\s+:?=\\s+(?\\S+)?\\s+\\{[^}]+?\\}\\s+\\{[^}]+}"); - // package private for testing - static Long extractHeapSize(List userDefinedJvmOptions) { - for (String jvmOption : userDefinedJvmOptions) { - final Matcher matcher = MAX_HEAP_SIZE.matcher(jvmOption); - if (matcher.matches()) { - final long size = Long.parseLong(matcher.group("size")); - final String unit = matcher.group("unit"); - if (unit == null) { - return size; - } else { - switch (unit.toLowerCase(Locale.ROOT)) { - case "k": - return size * KB; - case "m": - return size * MB; - case "g": - return size * GB; - default: - throw new IllegalArgumentException("Unknown unit [" + unit + "] for max heap size in [" + jvmOption + "]"); - } - } - } + static Map> finalJvmOptions( + final List userDefinedJvmOptions) throws InterruptedException, IOException { + return flagsFinal(userDefinedJvmOptions).stream() + .map(OPTION::matcher).filter(Matcher::matches) + .collect(Collectors.toUnmodifiableMap(m -> m.group("flag"), m -> Optional.ofNullable(m.group("value")))); + } + + private static List flagsFinal(final List userDefinedJvmOptions) throws InterruptedException, IOException { + /* + * To deduce the final set of JVM options that Elasticsearch is going to start with, we start a separate Java process with the JVM + * options that we would pass on the command line. For this Java process we will add two additional flags, -XX:+PrintFlagsFinal and + * -version. This causes the Java process that we start to parse the JVM options into their final values, display them on standard + * output, print the version to standard error, and then exit. The JVM itself never bootstraps, and therefore this process is + * lightweight. By doing this, we get the JVM options parsed exactly as the JVM that we are going to execute would parse them + * without having to implement our own JVM option parsing logic. + */ + final String java = Path.of(System.getProperty("java.home"), "bin", "java").toString(); + final List command = + Stream.of(Stream.of(java), userDefinedJvmOptions.stream(), Stream.of("-XX:+PrintFlagsFinal"), Stream.of("-version")) + .reduce(Stream::concat) + .get() + .collect(Collectors.toUnmodifiableList()); + final Process process = new ProcessBuilder().command(command).start(); + final List output = readLinesFromInputStream(process.getInputStream()); + final List error = readLinesFromInputStream(process.getErrorStream()); + final int status = process.waitFor(); + if (status != 0) { + final String message = String.format( + Locale.ROOT, + "starting java failed with [%d]\noutput:\n%s\nerror:\n%s", + status, + String.join("\n", output), + String.join("\n", error)); + throw new RuntimeException(message); + } else { + return output; + } + } + + private static List readLinesFromInputStream(final InputStream is) throws IOException { + try (InputStreamReader isr = new InputStreamReader(is, StandardCharsets.UTF_8); + BufferedReader br = new BufferedReader(isr)) { + return br.lines().collect(Collectors.toUnmodifiableList()); } - return null; + } + + // package private for testing + static Long extractHeapSize(final Map> finalJvmOptions) { + return Long.parseLong(finalJvmOptions.get("MaxHeapSize").get()); } private static final Pattern SYSTEM_PROPERTY = Pattern.compile("^-D(?[\\w+].*?)=(?.*)$"); @@ -105,4 +133,5 @@ static Map extractSystemProperties(List userDefinedJvmOp } return systemProperties; } + } diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java index d74f106c50ba4..7894cab72a1ee 100644 --- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java +++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java @@ -19,12 +19,14 @@ package org.elasticsearch.tools.launchers; +import org.elasticsearch.tools.java_version_checker.JavaVersion; + import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; @@ -35,10 +37,10 @@ import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; +import java.util.function.Predicate; import java.util.regex.Matcher; import java.util.regex.Pattern; - -import org.elasticsearch.tools.java_version_checker.JavaVersion; +import java.util.stream.Collectors; /** * Parses JVM options from a file and prints a single line with all JVM options to standard output. @@ -51,14 +53,14 @@ final class JvmOptionsParser { * * @param args the args to the program which should consist of a single option, the path to the JVM options */ - public static void main(final String[] args) throws IOException { + public static void main(final String[] args) throws InterruptedException, IOException { if (args.length != 1) { throw new IllegalArgumentException("expected one argument specifying path to jvm.options but was " + Arrays.toString(args)); } final List jvmOptions = new ArrayList<>(); final SortedMap invalidLines = new TreeMap<>(); try (InputStream is = Files.newInputStream(Paths.get(args[0])); - Reader reader = new InputStreamReader(is, Charset.forName("UTF-8")); + Reader reader = new InputStreamReader(is, StandardCharsets.UTF_8); BufferedReader br = new BufferedReader(reader)) { parse( JavaVersion.majorVersion(JavaVersion.CURRENT), @@ -78,7 +80,14 @@ public void accept(final int lineNumber, final String line) { } if (invalidLines.isEmpty()) { - List ergonomicJvmOptions = JvmErgonomics.choose(jvmOptions); + // now append the JVM options from ES_JAVA_OPTS + final String environmentJvmOptions = System.getenv("ES_JAVA_OPTS"); + if (environmentJvmOptions != null) { + jvmOptions.addAll(Arrays.stream(environmentJvmOptions.split("\\s+")) + .filter(Predicate.not(String::isBlank)) + .collect(Collectors.toUnmodifiableList())); + } + final List ergonomicJvmOptions = JvmErgonomics.choose(jvmOptions); jvmOptions.addAll(ergonomicJvmOptions); final String spaceDelimitedJvmOptions = spaceDelimitJvmOptions(jvmOptions); Launchers.outPrintln(spaceDelimitedJvmOptions); diff --git a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java index 4b075d78b70a8..b5b6699f4716f 100644 --- a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java +++ b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java @@ -19,38 +19,70 @@ package org.elasticsearch.tools.launchers; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasToString; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class JvmErgonomicsTests extends LaunchersTestCase { - public void testExtractValidHeapSize() { - assertEquals(Long.valueOf(1024), JvmErgonomics.extractHeapSize(Collections.singletonList("-Xmx1024"))); - assertEquals(Long.valueOf(2L * 1024 * 1024 * 1024), JvmErgonomics.extractHeapSize(Collections.singletonList("-Xmx2g"))); - assertEquals(Long.valueOf(32 * 1024 * 1024), JvmErgonomics.extractHeapSize(Collections.singletonList("-Xmx32M"))); - assertEquals(Long.valueOf(32 * 1024 * 1024), JvmErgonomics.extractHeapSize(Collections.singletonList("-XX:MaxHeapSize=32M"))); + + public void testExtractValidHeapSizeUsingXmx() throws InterruptedException, IOException { + assertThat( + JvmErgonomics.extractHeapSize(JvmErgonomics.finalJvmOptions(Collections.singletonList("-Xmx2g"))), + equalTo(2L << 30)); + } + + public void testExtractValidHeapSizeUsingMaxHeapSize() throws InterruptedException, IOException { + assertThat( + JvmErgonomics.extractHeapSize(JvmErgonomics.finalJvmOptions(Collections.singletonList("-XX:MaxHeapSize=2g"))), + equalTo(2L << 30)); + } + + public void testExtractValidHeapSizeNoOptionPresent() throws InterruptedException, IOException { + assertThat( + JvmErgonomics.extractHeapSize(JvmErgonomics.finalJvmOptions(Collections.emptyList())), + greaterThan(0L)); + } + + public void testHeapSizeInvalid() throws InterruptedException, IOException { + try { + JvmErgonomics.extractHeapSize(JvmErgonomics.finalJvmOptions(Collections.singletonList("-Xmx2Z"))); + fail("expected starting java to fail"); + } catch (final RuntimeException e) { + assertThat(e, hasToString(containsString(("starting java failed")))); + assertThat(e, hasToString(containsString(("Invalid maximum heap size: -Xmx2Z")))); + } } - public void testExtractInvalidHeapSize() { + public void testHeapSizeTooSmall() throws InterruptedException, IOException { try { - JvmErgonomics.extractHeapSize(Collections.singletonList("-Xmx2T")); - fail("Expected IllegalArgumentException to be raised"); - } catch (IllegalArgumentException expected) { - assertEquals("Unknown unit [T] for max heap size in [-Xmx2T]", expected.getMessage()); + JvmErgonomics.extractHeapSize(JvmErgonomics.finalJvmOptions(Collections.singletonList("-Xmx1024"))); + fail("expected starting java to fail"); + } catch (final RuntimeException e) { + assertThat(e, hasToString(containsString(("starting java failed")))); + assertThat(e, hasToString(containsString(("Too small maximum heap")))); } } - public void testExtractNoHeapSize() { - assertNull("No spaces allowed", JvmErgonomics.extractHeapSize(Collections.singletonList("-Xmx 1024"))); - assertNull("JVM option is not present", JvmErgonomics.extractHeapSize(Collections.singletonList(""))); - assertNull("Multiple JVM options per line", JvmErgonomics.extractHeapSize(Collections.singletonList("-Xms2g -Xmx2g"))); + public void testHeapSizeWithSpace() throws InterruptedException, IOException { + try { + JvmErgonomics.extractHeapSize(JvmErgonomics.finalJvmOptions(Collections.singletonList("-Xmx 1024"))); + fail("expected starting java to fail"); + } catch (final RuntimeException e) { + assertThat(e, hasToString(containsString(("starting java failed")))); + assertThat(e, hasToString(containsString(("Invalid maximum heap size: -Xmx 1024")))); + } } public void testExtractSystemProperties() { @@ -69,15 +101,16 @@ public void testExtractNoSystemProperties() { assertTrue(parsedSystemProperties.isEmpty()); } - public void testLittleMemoryErgonomicChoices() { + public void testLittleMemoryErgonomicChoices() throws InterruptedException, IOException { String smallHeap = randomFrom(Arrays.asList("64M", "512M", "1024M", "1G")); List expectedChoices = Collections.singletonList("-Dio.netty.allocator.type=unpooled"); assertEquals(expectedChoices, JvmErgonomics.choose(Arrays.asList("-Xms" + smallHeap, "-Xmx" + smallHeap))); } - public void testPlentyMemoryErgonomicChoices() { + public void testPlentyMemoryErgonomicChoices() throws InterruptedException, IOException { String largeHeap = randomFrom(Arrays.asList("1025M", "2048M", "2G", "8G")); List expectedChoices = Collections.singletonList("-Dio.netty.allocator.type=pooled"); assertEquals(expectedChoices, JvmErgonomics.choose(Arrays.asList("-Xms" + largeHeap, "-Xmx" + largeHeap))); } + } From 2a9da80a24b5a1847b2d54217f30a6a4290f3b0a Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Thu, 9 May 2019 12:59:45 +0200 Subject: [PATCH 036/321] Add HTML strip processor (#41888) This processor uses the lucene HTMLStripCharFilter class to remove HTML entities from a field. This adds to the char filter, so that there is possibility to store the stripped version as well. Note, that the characeter filter replaces tags with a newline, so that the produced HTML will look slightly different than the incoming HTML with regards to newlines. --- docs/reference/ingest/ingest-node.asciidoc | 1 + .../ingest/processors/html_strip.asciidoc | 26 +++++++ .../ingest/common/HtmlStripProcessor.java | 76 +++++++++++++++++++ .../ingest/common/IngestCommonPlugin.java | 3 +- .../HtmlStripProcessorFactoryTests.java | 27 +++++++ .../common/HtmlStripProcessorTests.java | 38 ++++++++++ .../rest-api-spec/test/ingest/10_basic.yml | 1 + .../rest-api-spec/test/ingest/40_mutate.yml | 9 ++- 8 files changed, 179 insertions(+), 2 deletions(-) create mode 100644 docs/reference/ingest/processors/html_strip.asciidoc create mode 100644 modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/HtmlStripProcessor.java create mode 100644 modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorFactoryTests.java create mode 100644 modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorTests.java diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 3c8d8e9abf2bf..1f8abc5675db9 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -864,6 +864,7 @@ include::processors/foreach.asciidoc[] include::processors/geoip.asciidoc[] include::processors/grok.asciidoc[] include::processors/gsub.asciidoc[] +include::processors/html_strip.asciidoc[] include::processors/join.asciidoc[] include::processors/json.asciidoc[] include::processors/kv.asciidoc[] diff --git a/docs/reference/ingest/processors/html_strip.asciidoc b/docs/reference/ingest/processors/html_strip.asciidoc new file mode 100644 index 0000000000000..2fa3cd7bbb8ae --- /dev/null +++ b/docs/reference/ingest/processors/html_strip.asciidoc @@ -0,0 +1,26 @@ +[[htmlstrip-processor]] +=== HTML Strip Processor +Removes HTML from field. + +NOTE: Each HTML tag is replaced with a `\n` character. + +[[htmlstrip-options]] +.HTML Strip Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The string-valued field to remove HTML tags from +| `target_field` | no | `field` | The field to assign the value to, by default `field` is updated in-place +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +include::common-options.asciidoc[] +|====== + +[source,js] +-------------------------------------------------- +{ + "html_strip": { + "field": "foo" + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/HtmlStripProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/HtmlStripProcessor.java new file mode 100644 index 0000000000000..aaeb5b3310b53 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/HtmlStripProcessor.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.apache.lucene.analysis.charfilter.HTMLStripCharFilter; +import org.elasticsearch.ElasticsearchException; + +import java.io.IOException; +import java.io.StringReader; +import java.util.Map; + +public final class HtmlStripProcessor extends AbstractStringProcessor { + + public static final String TYPE = "html_strip"; + + HtmlStripProcessor(String tag, String field, boolean ignoreMissing, String targetField) { + super(tag, field, ignoreMissing, targetField); + } + + @Override + protected String process(String value) { + // shortcut, no need to create a string builder and go through each char + if (value.contains("<") == false || value.contains(">") == false) { + return value; + } + + HTMLStripCharFilter filter = new HTMLStripCharFilter(new StringReader(value)); + + StringBuilder builder = new StringBuilder(); + int ch; + try { + while ((ch = filter.read()) != -1) { + builder.append((char)ch); + } + } catch (IOException e) { + throw new ElasticsearchException(e); + } + + return builder.toString(); + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory extends AbstractStringProcessor.Factory { + + public Factory() { + super(TYPE); + } + + @Override + protected HtmlStripProcessor newProcessor(String tag, Map config, String field, + boolean ignoreMissing, String targetField) { + return new HtmlStripProcessor(tag, field, ignoreMissing, targetField); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java index c3b1328d0b276..f194fe0c3d6c0 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java @@ -87,7 +87,8 @@ public Map getProcessors(Processor.Parameters paramet entry(BytesProcessor.TYPE, new BytesProcessor.Factory()), entry(PipelineProcessor.TYPE, new PipelineProcessor.Factory(parameters.ingestService)), entry(DissectProcessor.TYPE, new DissectProcessor.Factory()), - entry(DropProcessor.TYPE, new DropProcessor.Factory())); + entry(DropProcessor.TYPE, new DropProcessor.Factory()), + entry(HtmlStripProcessor.TYPE, new HtmlStripProcessor.Factory())); } @Override diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorFactoryTests.java new file mode 100644 index 0000000000000..ccadcd2770f4a --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorFactoryTests.java @@ -0,0 +1,27 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +public class HtmlStripProcessorFactoryTests extends AbstractStringProcessorFactoryTestCase { + @Override + protected AbstractStringProcessor.Factory newFactory() { + return new HtmlStripProcessor.Factory(); + } +} diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorTests.java new file mode 100644 index 0000000000000..79ccff84a726e --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/HtmlStripProcessorTests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +public class HtmlStripProcessorTests extends AbstractStringProcessorTestCase { + + @Override + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + return new HtmlStripProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); + } + + @Override + protected String modifyInput(String input) { + return "

test" + input + "

test"; + } + + @Override + protected String expectedResult(String input) { + return "\ntest" + input + "\ntest"; + } +} diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml index f83a9e78cb3fe..8a803eae1fc3d 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml @@ -23,6 +23,7 @@ - contains: { nodes.$master.ingest.processors: { type: foreach } } - contains: { nodes.$master.ingest.processors: { type: grok } } - contains: { nodes.$master.ingest.processors: { type: gsub } } + - contains: { nodes.$master.ingest.processors: { type: html_strip } } - contains: { nodes.$master.ingest.processors: { type: join } } - contains: { nodes.$master.ingest.processors: { type: json } } - contains: { nodes.$master.ingest.processors: { type: kv } } diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/40_mutate.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/40_mutate.yml index 11b6a64cd3fdf..9de9d19c0b879 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/40_mutate.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/40_mutate.yml @@ -76,6 +76,11 @@ teardown: "pattern" : "-", "replacement" : "." } + }, + { + "html_strip" : { + "field" : "field_to_html_strip" + } } ] } @@ -96,7 +101,8 @@ teardown: "field_to_split": "127-0-0-1", "field_to_join": ["127","0","0","1"], "field_to_convert": ["127","0","0","1"], - "field_to_gsub": "127-0-0-1" + "field_to_gsub": "127-0-0-1", + "field_to_html_strip": "

this is a test" } - do: @@ -114,6 +120,7 @@ teardown: - match: { _source.field_to_join: "127-0-0-1" } - match: { _source.field_to_convert: [127,0,0,1] } - match: { _source.field_to_gsub: "127.0.0.1" } + - match: { _source.field_to_html_strip: "\nthis \nis\n a test" } --- "Test metadata": From f8e0f8a652d6492db155660a2d8d187f831ba941 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Thu, 9 May 2019 13:08:33 +0100 Subject: [PATCH 037/321] Cut AnalyzeResponse over to Writeable (#41915) This commit makes AnalyzeResponse and its various helper classes implement Writeable. The classes are also now immutable. Relates to #34389 --- .../admin/indices/analyze/AnalyzeAction.java | 8 +- .../indices/analyze/AnalyzeResponse.java | 101 +++++------ .../analyze/DetailAnalyzeResponse.java | 165 +++++++----------- .../analyze/TransportAnalyzeAction.java | 8 +- .../shard/TransportSingleShardAction.java | 21 ++- .../indices/analyze/AnalyzeResponseTests.java | 26 +-- 6 files changed, 156 insertions(+), 173 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java index e2bbd655992de..3677cd6cb4e43 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.analyze; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class AnalyzeAction extends Action { @@ -30,8 +31,13 @@ private AnalyzeAction() { super(NAME); } + @Override + public Writeable.Reader getResponseReader() { + return AnalyzeResponse::new; + } + @Override public AnalyzeResponse newResponse() { - return new AnalyzeResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java index e571db951cbc1..945c2128bab39 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -43,17 +43,14 @@ public class AnalyzeResponse extends ActionResponse implements Iterable, ToXContentObject { - public static class AnalyzeToken implements Streamable, ToXContentObject { - private String term; - private int startOffset; - private int endOffset; - private int position; - private int positionLength = 1; - private Map attributes; - private String type; - - AnalyzeToken() { - } + public static class AnalyzeToken implements Writeable, ToXContentObject { + private final String term; + private final int startOffset; + private final int endOffset; + private final int position; + private final int positionLength; + private final Map attributes; + private final String type; @Override public boolean equals(Object o) { @@ -85,6 +82,21 @@ public AnalyzeToken(String term, int position, int startOffset, int endOffset, i this.attributes = attributes; } + public AnalyzeToken(StreamInput in) throws IOException { + term = in.readString(); + startOffset = in.readInt(); + endOffset = in.readInt(); + position = in.readVInt(); + Integer len = in.readOptionalVInt(); + if (len != null) { + positionLength = len; + } else { + positionLength = 1; + } + type = in.readOptionalString(); + attributes = in.readMap(); + } + public String getTerm() { return this.term; } @@ -134,12 +146,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static AnalyzeToken readAnalyzeToken(StreamInput in) throws IOException { - AnalyzeToken analyzeToken = new AnalyzeToken(); - analyzeToken.readFrom(in); - return analyzeToken; - } - public static AnalyzeToken fromXContent(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); String field = null; @@ -184,22 +190,6 @@ public static AnalyzeToken fromXContent(XContentParser parser) throws IOExceptio return new AnalyzeToken(term, position, startOffset, endOffset, positionLength, type, attributes); } - @Override - public void readFrom(StreamInput in) throws IOException { - term = in.readString(); - startOffset = in.readInt(); - endOffset = in.readInt(); - position = in.readVInt(); - Integer len = in.readOptionalVInt(); - if (len != null) { - positionLength = len; - } else { - positionLength = 1; - } - type = in.readOptionalString(); - attributes = in.readMap(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(term); @@ -212,18 +202,35 @@ public void writeTo(StreamOutput out) throws IOException { } } - private DetailAnalyzeResponse detail; - - private List tokens; + private final DetailAnalyzeResponse detail; - AnalyzeResponse() { - } + private final List tokens; public AnalyzeResponse(List tokens, DetailAnalyzeResponse detail) { this.tokens = tokens; this.detail = detail; } + public AnalyzeResponse(StreamInput in) throws IOException { + super.readFrom(in); + int size = in.readVInt(); + if (size > 0) { + tokens = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + tokens.add(new AnalyzeToken(in)); + } + } + else { + tokens = null; + } + detail = in.readOptionalWriteable(DetailAnalyzeResponse::new); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + public List getTokens() { return this.tokens; } @@ -268,20 +275,6 @@ public static AnalyzeResponse fromXContent(XContentParser parser) throws IOExcep return PARSER.parse(parser, null); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - tokens = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - tokens.add(AnalyzeToken.readAnalyzeToken(in)); - } - if (tokens.size() == 0) { - tokens = null; - } - detail = in.readOptionalStreamable(DetailAnalyzeResponse::new); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -293,7 +286,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeVInt(0); } - out.writeOptionalStreamable(detail); + out.writeOptionalWriteable(detail); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java index b44354047be03..1e84d9e0a2e1a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -40,16 +40,13 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { +public class DetailAnalyzeResponse implements Writeable, ToXContentFragment { - private boolean customAnalyzer = false; - private AnalyzeTokenList analyzer; - private CharFilteredText[] charfilters; - private AnalyzeTokenList tokenizer; - private AnalyzeTokenList[] tokenfilters; - - DetailAnalyzeResponse() { - } + private final boolean customAnalyzer; + private final AnalyzeTokenList analyzer; + private final CharFilteredText[] charfilters; + private final AnalyzeTokenList tokenizer; + private final AnalyzeTokenList[] tokenfilters; public DetailAnalyzeResponse(AnalyzeTokenList analyzer) { this(false, analyzer, null, null, null); @@ -71,46 +68,55 @@ public DetailAnalyzeResponse(boolean customAnalyzer, this.tokenfilters = tokenfilters; } - public AnalyzeTokenList analyzer() { - return this.analyzer; + public DetailAnalyzeResponse(StreamInput in) throws IOException { + this.customAnalyzer = in.readBoolean(); + if (customAnalyzer) { + tokenizer = new AnalyzeTokenList(in); + int size = in.readVInt(); + if (size > 0) { + charfilters = new CharFilteredText[size]; + for (int i = 0; i < size; i++) { + charfilters[i] = new CharFilteredText(in); + } + } + else { + charfilters = null; + } + size = in.readVInt(); + if (size > 0) { + tokenfilters = new AnalyzeTokenList[size]; + for (int i = 0; i < size; i++) { + tokenfilters[i] = new AnalyzeTokenList(in); + } + } + else { + tokenfilters = null; + } + analyzer = null; + } else { + analyzer = new AnalyzeTokenList(in); + tokenfilters = null; + tokenizer = null; + charfilters = null; + } } - public DetailAnalyzeResponse analyzer(AnalyzeTokenList analyzer) { - this.customAnalyzer = false; - this.analyzer = analyzer; - return this; + public AnalyzeTokenList analyzer() { + return this.analyzer; } public CharFilteredText[] charfilters() { return this.charfilters; } - public DetailAnalyzeResponse charfilters(CharFilteredText[] charfilters) { - this.customAnalyzer = true; - this.charfilters = charfilters; - return this; - } - public AnalyzeTokenList tokenizer() { return tokenizer; } - public DetailAnalyzeResponse tokenizer(AnalyzeTokenList tokenizer) { - this.customAnalyzer = true; - this.tokenizer = tokenizer; - return this; - } - public AnalyzeTokenList[] tokenfilters() { return tokenfilters; } - public DetailAnalyzeResponse tokenfilters(AnalyzeTokenList[] tokenfilters) { - this.customAnalyzer = true; - this.tokenfilters = tokenfilters; - return this; - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -201,30 +207,6 @@ static final class Fields { static final String TOKENFILTERS = "tokenfilters"; } - @Override - public void readFrom(StreamInput in) throws IOException { - this.customAnalyzer = in.readBoolean(); - if (customAnalyzer) { - tokenizer = AnalyzeTokenList.readAnalyzeTokenList(in); - int size = in.readVInt(); - if (size > 0) { - charfilters = new CharFilteredText[size]; - for (int i = 0; i < size; i++) { - charfilters[i] = CharFilteredText.readCharFilteredText(in); - } - } - size = in.readVInt(); - if (size > 0) { - tokenfilters = new AnalyzeTokenList[size]; - for (int i = 0; i < size; i++) { - tokenfilters[i] = AnalyzeTokenList.readAnalyzeTokenList(in); - } - } - } else { - analyzer = AnalyzeTokenList.readAnalyzeTokenList(in); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(customAnalyzer); @@ -251,9 +233,9 @@ public void writeTo(StreamOutput out) throws IOException { } } - public static class AnalyzeTokenList implements Streamable, ToXContentObject { - private String name; - private AnalyzeResponse.AnalyzeToken[] tokens; + public static class AnalyzeTokenList implements Writeable, ToXContentObject { + private final String name; + private final AnalyzeResponse.AnalyzeToken[] tokens; @Override public boolean equals(Object o) { @@ -271,14 +253,25 @@ public int hashCode() { return result; } - AnalyzeTokenList() { - } - public AnalyzeTokenList(String name, AnalyzeResponse.AnalyzeToken[] tokens) { this.name = name; this.tokens = tokens; } + public AnalyzeTokenList(StreamInput in) throws IOException { + name = in.readString(); + int size = in.readVInt(); + if (size > 0) { + tokens = new AnalyzeResponse.AnalyzeToken[size]; + for (int i = 0; i < size; i++) { + tokens[i] = new AnalyzeResponse.AnalyzeToken(in); + } + } + else { + tokens = null; + } + } + public String getName() { return name; } @@ -287,12 +280,6 @@ public AnalyzeResponse.AnalyzeToken[] getTokens() { return tokens; } - public static AnalyzeTokenList readAnalyzeTokenList(StreamInput in) throws IOException { - AnalyzeTokenList list = new AnalyzeTokenList(); - list.readFrom(in); - return list; - } - XContentBuilder toXContentWithoutObject(XContentBuilder builder, Params params) throws IOException { builder.field(Fields.NAME, this.name); builder.startArray(AnalyzeResponse.Fields.TOKENS); @@ -327,18 +314,6 @@ public static AnalyzeTokenList fromXContent(XContentParser parser) throws IOExce return PARSER.parse(parser, null); } - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - int size = in.readVInt(); - if (size > 0) { - tokens = new AnalyzeResponse.AnalyzeToken[size]; - for (int i = 0; i < size; i++) { - tokens[i] = AnalyzeResponse.AnalyzeToken.readAnalyzeToken(in); - } - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); @@ -353,12 +328,9 @@ public void writeTo(StreamOutput out) throws IOException { } } - public static class CharFilteredText implements Streamable, ToXContentObject { - private String name; - private String[] texts; - - CharFilteredText() { - } + public static class CharFilteredText implements Writeable, ToXContentObject { + private final String name; + private final String[] texts; public CharFilteredText(String name, String[] texts) { this.name = name; @@ -369,6 +341,11 @@ public CharFilteredText(String name, String[] texts) { } } + public CharFilteredText(StreamInput in) throws IOException { + name = in.readString(); + texts = in.readStringArray(); + } + public String getName() { return name; } @@ -398,18 +375,6 @@ public static CharFilteredText fromXContent(XContentParser parser) throws IOExce return PARSER.parse(parser, null); } - public static CharFilteredText readCharFilteredText(StreamInput in) throws IOException { - CharFilteredText text = new CharFilteredText(); - text.readFrom(in); - return text; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - texts = in.readStringArray(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 9538bd4b4d22c..07f445b6fc74a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; @@ -97,7 +98,12 @@ public TransportAnalyzeAction(Settings settings, ThreadPool threadPool, ClusterS @Override protected AnalyzeResponse newResponse() { - return new AnalyzeResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + protected Writeable.Reader getResponseReader() { + return AnalyzeResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 6d7ad085dcd1e..8b0e69bd457c8 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -38,6 +38,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.shard.ShardId; @@ -118,8 +119,18 @@ protected void doRun() throws Exception { } }); } + + @Deprecated protected abstract Response newResponse(); + protected Writeable.Reader getResponseReader() { + return in -> { + Response response = newResponse(); + response.readFrom(in); + return response; + }; + } + protected abstract boolean resolveIndex(Request request); protected ClusterBlockException checkGlobalBlock(ClusterState state) { @@ -182,13 +193,12 @@ private AsyncSingleAction(Request request, ActionListener listener) { public void start() { if (shardIt == null) { // just execute it on the local node + final Writeable.Reader reader = getResponseReader(); transportService.sendRequest(clusterService.localNode(), transportShardAction, internalRequest.request(), new TransportResponseHandler() { @Override public Response read(StreamInput in) throws IOException { - Response response = newResponse(); - response.readFrom(in); - return response; + return reader.read(in); } @Override @@ -251,14 +261,13 @@ private void perform(@Nullable final Exception currentFailure) { node ); } + final Writeable.Reader reader = getResponseReader(); transportService.sendRequest(node, transportShardAction, internalRequest.request(), new TransportResponseHandler() { @Override public Response read(StreamInput in) throws IOException { - Response response = newResponse(); - response.readFrom(in); - return response; + return reader.read(in); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java index 7f1b7fb41ba18..a4cee7a4cde2a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java @@ -20,12 +20,13 @@ package org.elasticsearch.action.admin.indices.analyze; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.ArrayList; @@ -37,7 +38,7 @@ import static org.hamcrest.Matchers.equalTo; -public class AnalyzeResponseTests extends AbstractStreamableXContentTestCase { +public class AnalyzeResponseTests extends AbstractSerializingTestCase { @Override protected Predicate getRandomFieldsExcludeFilter() { @@ -50,8 +51,8 @@ protected AnalyzeResponse doParseInstance(XContentParser parser) throws IOExcept } @Override - protected AnalyzeResponse createBlankInstance() { - return new AnalyzeResponse(); + protected Writeable.Reader instanceReader() { + return AnalyzeResponse::new; } @Override @@ -61,21 +62,24 @@ protected AnalyzeResponse createTestInstance() { for (int i = 0; i < tokenCount; i++) { tokens[i] = randomToken(); } - DetailAnalyzeResponse dar = null; if (randomBoolean()) { - dar = new DetailAnalyzeResponse(); + DetailAnalyzeResponse.CharFilteredText[] charfilters = null; + DetailAnalyzeResponse.AnalyzeTokenList[] tokenfilters = null; if (randomBoolean()) { - dar.charfilters(new DetailAnalyzeResponse.CharFilteredText[]{ + charfilters = new DetailAnalyzeResponse.CharFilteredText[]{ new DetailAnalyzeResponse.CharFilteredText("my_charfilter", new String[]{"one two"}) - }); + }; } - dar.tokenizer(new DetailAnalyzeResponse.AnalyzeTokenList("my_tokenizer", tokens)); if (randomBoolean()) { - dar.tokenfilters(new DetailAnalyzeResponse.AnalyzeTokenList[]{ + tokenfilters = new DetailAnalyzeResponse.AnalyzeTokenList[]{ new DetailAnalyzeResponse.AnalyzeTokenList("my_tokenfilter_1", tokens), new DetailAnalyzeResponse.AnalyzeTokenList("my_tokenfilter_2", tokens) - }); + }; } + DetailAnalyzeResponse dar = new DetailAnalyzeResponse( + charfilters, + new DetailAnalyzeResponse.AnalyzeTokenList("my_tokenizer", tokens), + tokenfilters); return new AnalyzeResponse(null, dar); } return new AnalyzeResponse(Arrays.asList(tokens), null); From 44fa35a75f1e75fe805c2e5e969656bbc23715e7 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 9 May 2019 14:14:09 +0200 Subject: [PATCH 038/321] Remove Version.V_6_0_0 constant from PercolatorMatchedSlotSubFetchPhase (#41995) and use Version.CURRENT instead of passing down index created version --- .../PercolatorMatchedSlotSubFetchPhase.java | 11 +++++------ .../PercolatorMatchedSlotSubFetchPhaseTests.java | 5 +++++ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java index fdcc9156b415e..5bdeef8a7b1b4 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java @@ -61,7 +61,9 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept innerHitsExecute(context.query(), context.searcher(), hits); } - static void innerHitsExecute(Query mainQuery, IndexSearcher indexSearcher, SearchHit[] hits) throws IOException { + static void innerHitsExecute(Query mainQuery, + IndexSearcher indexSearcher, + SearchHit[] hits) throws IOException { List percolateQueries = locatePercolatorQuery(mainQuery); if (percolateQueries.isEmpty()) { return; @@ -71,11 +73,8 @@ static void innerHitsExecute(Query mainQuery, IndexSearcher indexSearcher, Searc for (PercolateQuery percolateQuery : percolateQueries) { String fieldName = singlePercolateQuery ? FIELD_NAME_PREFIX : FIELD_NAME_PREFIX + "_" + percolateQuery.getName(); IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher(); - // there is a bug in lucene's MemoryIndex that doesn't allow us to use docValues here... - // See https://issues.apache.org/jira/browse/LUCENE-8055 - // for now we just use version 6.0 version to find nested parent - final Version version = Version.V_6_0_0; //context.mapperService().getIndexSettings().getIndexVersionCreated(); - Weight weight = percolatorIndexSearcher.createWeight(percolatorIndexSearcher.rewrite(Queries.newNonNestedFilter(version)), + Query nonNestedQuery = Queries.newNonNestedFilter(Version.CURRENT); + Weight weight = percolatorIndexSearcher.createWeight(percolatorIndexSearcher.rewrite(nonNestedQuery), ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0)); int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc(); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java index 89356bf274d8d..7d81df9323fbc 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java @@ -20,6 +20,7 @@ import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.document.Document; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -33,6 +34,7 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESTestCase; @@ -58,6 +60,7 @@ public void testHitsExecute() throws Exception { PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value")); MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); + memoryIndex.addField(new NumericDocValuesField(SeqNoFieldMapper.PRIMARY_TERM_NAME, 0), null); PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery()); @@ -72,6 +75,7 @@ public void testHitsExecute() throws Exception { PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value")); MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value1", new WhitespaceAnalyzer()); + memoryIndex.addField(new NumericDocValuesField(SeqNoFieldMapper.PRIMARY_TERM_NAME, 0), null); PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery()); @@ -85,6 +89,7 @@ public void testHitsExecute() throws Exception { PercolateQuery.QueryStore queryStore = ctx -> docId -> null; MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); + memoryIndex.addField(new NumericDocValuesField(SeqNoFieldMapper.PRIMARY_TERM_NAME, 0), null); PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery()); From 6fe07e6bc9c89ced69f33d984876a8e65ff97ad0 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 9 May 2019 13:16:13 +0100 Subject: [PATCH 039/321] Mute ApiKeyIntegTests See https://github.com/elastic/elasticsearch/issues/41747 --- .../org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index f6849cae4c1cd..c8cea45037942 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -315,6 +315,7 @@ private Client waitForExpiredApiKeysRemoverTriggerReadyAndGetClient() throws Exc return internalCluster().client(nodeWithMostRecentRun); } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/41747") public void testExpiredApiKeysBehaviorWhenKeysExpired1WeekBeforeAnd1DayBefore() throws Exception { Client client = waitForExpiredApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( Collections.singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, From 166a921d79a23386116bd41e92054c5312528b5f Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 9 May 2019 14:25:07 +0200 Subject: [PATCH 040/321] Fix IAE on cross_fields query introduced in 7.0.1 (#41938) If the max doc in the index is greater than the minimum total term frequency among the requested fields we need to adjust max doc to be equal to the min ttf. This was removed by mistake when fixing #41125. Closes #41934 --- .../lucene/queries/BlendedTermQuery.java | 4 ++- .../lucene/queries/BlendedTermQueryTests.java | 30 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index 1700979c32d64..c696d476bbb43 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -118,7 +118,9 @@ protected void blend(final TermStates[] contexts, int maxDoc, IndexReader reader // otherwise the statistics don't match minSumTTF = Math.min(minSumTTF, reader.getSumTotalTermFreq(terms[i].field())); } - + } + if (maxDoc > minSumTTF) { + maxDoc = (int)minSumTTF; } if (max == 0) { return; // we are done that term doesn't exist at all diff --git a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index ce33c247a3337..9d05e119cbb78 100644 --- a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -227,4 +227,34 @@ public void testExtractTerms() throws IOException { assertThat(extracted.size(), equalTo(terms.size())); assertThat(extracted, containsInAnyOrder(terms.toArray(new Term[0]))); } + + public void testMinTTF() throws IOException { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))); + FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); + ft.freeze(); + + for (int i = 0; i < 10; i++) { + Document d = new Document(); + d.add(new TextField("id", Integer.toString(i), Field.Store.YES)); + d.add(new Field("dense", "foo foo foo", ft)); + if (i % 10 == 0) { + d.add(new Field("sparse", "foo", ft)); + } + w.addDocument(d); + } + w.commit(); + DirectoryReader reader = DirectoryReader.open(w); + IndexSearcher searcher = setSimilarity(newSearcher(reader)); + { + String[] fields = new String[]{"dense", "sparse"}; + Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "foo"), 0.1f); + TopDocs search = searcher.search(query, 10); + ScoreDoc[] scoreDocs = search.scoreDocs; + assertEquals(Integer.toString(0), reader.document(scoreDocs[0].doc).getField("id").stringValue()); + } + reader.close(); + w.close(); + dir.close(); + } } From ad8591aecaabed66e23e7f651d4d452af1bf5616 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 9 May 2019 13:46:59 +0100 Subject: [PATCH 041/321] Mute IndexStatsIT#testFilterCacheStats See https://github.com/elastic/elasticsearch/issues/32506 --- .../test/java/org/elasticsearch/indices/stats/IndexStatsIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 5fb67a64d9db5..a3697af50b0b6 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -1009,6 +1009,7 @@ private void assertCumulativeQueryCacheStats(IndicesStatsResponse response) { } @TestLogging("_root:DEBUG") // this fails at a very low rate on CI: https://github.com/elastic/elasticsearch/issues/32506 + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/32506") public void testFilterCacheStats() throws Exception { Settings settings = Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build(); assertAcked(prepareCreate("index").setSettings(settings).get()); From 5f5bbcd57648d0b6a0ac2ab662a794af486f9348 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 9 May 2019 14:53:03 +0200 Subject: [PATCH 042/321] Remove old version constants from QueryAnalyzer (#41996) --- .../percolator/QueryAnalyzer.java | 264 +++++------------- .../percolator/QueryAnalyzerTests.java | 261 +---------------- 2 files changed, 67 insertions(+), 458 deletions(-) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index 38c6057e1866f..c245e2cb3a20b 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -206,20 +206,8 @@ private static BiFunction phraseQuery() { return new Result(true, Collections.emptySet(), 0); } - if (version.onOrAfter(Version.V_6_1_0)) { - Set extractions = Arrays.stream(terms).map(QueryExtraction::new).collect(toSet()); - return new Result(false, extractions, extractions.size()); - } else { - // the longest term is likely to be the rarest, - // so from a performance perspective it makes sense to extract that - Term longestTerm = terms[0]; - for (Term term : terms) { - if (longestTerm.bytes().length < term.bytes().length) { - longestTerm = term; - } - } - return new Result(false, Collections.singleton(new QueryExtraction(longestTerm)), 1); - } + Set extractions = Arrays.stream(terms).map(QueryExtraction::new).collect(toSet()); + return new Result(false, extractions, extractions.size()); }; } @@ -255,23 +243,14 @@ private static BiFunction spanTermQuery() { private static BiFunction spanNearQuery() { return (query, version) -> { SpanNearQuery spanNearQuery = (SpanNearQuery) query; - if (version.onOrAfter(Version.V_6_1_0)) { - // This has the same problem as boolean queries when it comes to duplicated clauses - // so we rewrite to a boolean query to keep things simple. - BooleanQuery.Builder builder = new BooleanQuery.Builder(); - for (SpanQuery clause : spanNearQuery.getClauses()) { - builder.add(clause, Occur.FILTER); - } - // make sure to unverify the result - return booleanQuery().apply(builder.build(), version).unverify(); - } else { - Result bestClause = null; - for (SpanQuery clause : spanNearQuery.getClauses()) { - Result temp = analyze(clause, version); - bestClause = selectBestResult(temp, bestClause); - } - return bestClause; + // This has the same problem as boolean queries when it comes to duplicated clauses + // so we rewrite to a boolean query to keep things simple. + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (SpanQuery clause : spanNearQuery.getClauses()) { + builder.add(clause, Occur.FILTER); } + // make sure to unverify the result + return booleanQuery().apply(builder.build(), version).unverify(); }; } @@ -478,77 +457,69 @@ private static Result handleConjunction(List conjunctions, Version versi if (conjunctions.isEmpty()) { throw new IllegalArgumentException("Must have at least on conjunction sub result"); } - if (version.onOrAfter(Version.V_6_1_0)) { - for (Result subResult : conjunctions) { - if (subResult.isMatchNoDocs()) { - return subResult; - } + for (Result subResult : conjunctions) { + if (subResult.isMatchNoDocs()) { + return subResult; } - int msm = 0; - boolean verified = true; - boolean matchAllDocs = true; - boolean hasDuplicateTerms = false; - Set extractions = new HashSet<>(); - Set seenRangeFields = new HashSet<>(); - for (Result result : conjunctions) { - // In case that there are duplicate query extractions we need to be careful with - // incrementing msm, - // because that could lead to valid matches not becoming candidate matches: - // query: (field:val1 AND field:val2) AND (field:val2 AND field:val3) - // doc: field: val1 val2 val3 - // So lets be protective and decrease the msm: - int resultMsm = result.minimumShouldMatch; - for (QueryExtraction queryExtraction : result.extractions) { - if (queryExtraction.range != null) { - // In case of range queries each extraction does not simply increment the - // minimum_should_match - // for that percolator query like for a term based extraction, so that can lead - // to more false - // positives for percolator queries with range queries than term based queries. - // The is because the way number fields are extracted from the document to be - // percolated. - // Per field a single range is extracted and if a percolator query has two or - // more range queries - // on the same field, then the minimum should match can be higher than clauses - // in the CoveringQuery. - // Therefore right now the minimum should match is incremented once per number - // field when processing - // the percolator query at index time. - if (seenRangeFields.add(queryExtraction.range.fieldName)) { - resultMsm = 1; - } else { - resultMsm = 0; - } - } - - if (extractions.contains(queryExtraction)) { - + } + int msm = 0; + boolean verified = true; + boolean matchAllDocs = true; + boolean hasDuplicateTerms = false; + Set extractions = new HashSet<>(); + Set seenRangeFields = new HashSet<>(); + for (Result result : conjunctions) { + // In case that there are duplicate query extractions we need to be careful with + // incrementing msm, + // because that could lead to valid matches not becoming candidate matches: + // query: (field:val1 AND field:val2) AND (field:val2 AND field:val3) + // doc: field: val1 val2 val3 + // So lets be protective and decrease the msm: + int resultMsm = result.minimumShouldMatch; + for (QueryExtraction queryExtraction : result.extractions) { + if (queryExtraction.range != null) { + // In case of range queries each extraction does not simply increment the + // minimum_should_match + // for that percolator query like for a term based extraction, so that can lead + // to more false + // positives for percolator queries with range queries than term based queries. + // The is because the way number fields are extracted from the document to be + // percolated. + // Per field a single range is extracted and if a percolator query has two or + // more range queries + // on the same field, then the minimum should match can be higher than clauses + // in the CoveringQuery. + // Therefore right now the minimum should match is incremented once per number + // field when processing + // the percolator query at index time. + if (seenRangeFields.add(queryExtraction.range.fieldName)) { + resultMsm = 1; + } else { resultMsm = 0; - verified = false; - break; } } - msm += resultMsm; - if (result.verified == false - // If some inner extractions are optional, the result can't be verified - || result.minimumShouldMatch < result.extractions.size()) { + if (extractions.contains(queryExtraction)) { + + resultMsm = 0; verified = false; + break; } - matchAllDocs &= result.matchAllDocs; - extractions.addAll(result.extractions); } - if (matchAllDocs) { - return new Result(matchAllDocs, verified); - } else { - return new Result(verified, extractions, hasDuplicateTerms ? 1 : msm); + msm += resultMsm; + + if (result.verified == false + // If some inner extractions are optional, the result can't be verified + || result.minimumShouldMatch < result.extractions.size()) { + verified = false; } + matchAllDocs &= result.matchAllDocs; + extractions.addAll(result.extractions); + } + if (matchAllDocs) { + return new Result(matchAllDocs, verified); } else { - Result bestClause = null; - for (Result result : conjunctions) { - bestClause = selectBestResult(result, bestClause); - } - return bestClause; + return new Result(verified, extractions, hasDuplicateTerms ? 1 : msm); } } @@ -565,12 +536,7 @@ private static Result handleDisjunctionQuery(List disjunctions, int requi private static Result handleDisjunction(List disjunctions, int requiredShouldClauses, Version version) { // Keep track of the msm for each clause: List clauses = new ArrayList<>(disjunctions.size()); - boolean verified; - if (version.before(Version.V_6_1_0)) { - verified = requiredShouldClauses <= 1; - } else { - verified = true; - } + boolean verified = true; int numMatchAllClauses = 0; boolean hasRangeExtractions = false; @@ -617,10 +583,9 @@ private static Result handleDisjunction(List disjunctions, int requiredS boolean matchAllDocs = numMatchAllClauses > 0 && numMatchAllClauses >= requiredShouldClauses; int msm = 0; - if (version.onOrAfter(Version.V_6_1_0) && - // Having ranges would mean we need to juggle with the msm and that complicates this logic a lot, - // so for now lets not do it. - hasRangeExtractions == false) { + // Having ranges would mean we need to juggle with the msm and that complicates this logic a lot, + // so for now lets not do it. + if (hasRangeExtractions == false) { // Figure out what the combined msm is for this disjunction: // (sum the lowest required clauses, otherwise we're too strict and queries may not match) clauses = clauses.stream() @@ -648,103 +613,6 @@ private static Result handleDisjunction(List disjunctions, int requiredS } } - /** - * Return an extraction for the conjunction of {@code result1} and {@code result2} - * by picking up clauses that look most restrictive and making it unverified if - * the other clause is not null and doesn't match all documents. This is used by - * 6.0.0 indices which didn't use the terms_set query. - */ - static Result selectBestResult(Result result1, Result result2) { - assert result1 != null || result2 != null; - if (result1 == null) { - return result2; - } else if (result2 == null) { - return result1; - } else if (result1.matchAllDocs) { // conjunction with match_all - Result result = result2; - if (result1.verified == false) { - result = result.unverify(); - } - return result; - } else if (result2.matchAllDocs) { // conjunction with match_all - Result result = result1; - if (result2.verified == false) { - result = result.unverify(); - } - return result; - } else { - // Prefer term based extractions over range based extractions: - boolean onlyRangeBasedExtractions = true; - for (QueryExtraction clause : result1.extractions) { - if (clause.term != null) { - onlyRangeBasedExtractions = false; - break; - } - } - for (QueryExtraction clause : result2.extractions) { - if (clause.term != null) { - onlyRangeBasedExtractions = false; - break; - } - } - - if (onlyRangeBasedExtractions) { - BytesRef extraction1SmallestRange = smallestRange(result1.extractions); - BytesRef extraction2SmallestRange = smallestRange(result2.extractions); - if (extraction1SmallestRange == null) { - return result2.unverify(); - } else if (extraction2SmallestRange == null) { - return result1.unverify(); - } - - // Keep the clause with smallest range, this is likely to be the rarest. - if (extraction1SmallestRange.compareTo(extraction2SmallestRange) <= 0) { - return result1.unverify(); - } else { - return result2.unverify(); - } - } else { - int extraction1ShortestTerm = minTermLength(result1.extractions); - int extraction2ShortestTerm = minTermLength(result2.extractions); - // keep the clause with longest terms, this likely to be rarest. - if (extraction1ShortestTerm >= extraction2ShortestTerm) { - return result1.unverify(); - } else { - return result2.unverify(); - } - } - } - } - - private static int minTermLength(Set extractions) { - // In case there are only range extractions, then we return Integer.MIN_VALUE, - // so that selectBestExtraction(...) we are likely to prefer the extractions that contains at least a single extraction - if (extractions.stream().filter(queryExtraction -> queryExtraction.term != null).count() == 0 && - extractions.stream().filter(queryExtraction -> queryExtraction.range != null).count() > 0) { - return Integer.MIN_VALUE; - } - - int min = Integer.MAX_VALUE; - for (QueryExtraction qt : extractions) { - if (qt.term != null) { - min = Math.min(min, qt.bytes().length); - } - } - return min; - } - - private static BytesRef smallestRange(Set terms) { - BytesRef min = null; - for (QueryExtraction qt : terms) { - if (qt.range != null) { - if (min == null || qt.range.interval.compareTo(min) < 0) { - min = qt.range.interval; - } - } - } - return min; - } - /** * Query extraction result. A result is a candidate for a given document either if: * - `matchAllDocs` is true diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index 712d5688827f2..544cfc6ef6193 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -75,7 +75,6 @@ import static org.elasticsearch.percolator.QueryAnalyzer.UnsupportedQueryException; import static org.elasticsearch.percolator.QueryAnalyzer.analyze; -import static org.elasticsearch.percolator.QueryAnalyzer.selectBestResult; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; @@ -148,22 +147,6 @@ public void testExtractQueryMetadata_multiPhraseQuery() { assertThat(terms.get(5).bytes().utf8ToString(), equalTo("_term6")); } - public void testExtractQueryMetadata_multiPhraseQuery_pre6dot1() { - MultiPhraseQuery multiPhraseQuery = new MultiPhraseQuery.Builder() - .add(new Term("_field", "_long_term")) - .add(new Term[] {new Term("_field", "_long_term"), new Term("_field", "_term")}) - .add(new Term[] {new Term("_field", "_long_term"), new Term("_field", "_very_long_term")}) - .add(new Term[] {new Term("_field", "_very_long_term")}) - .build(); - Result result = analyze(multiPhraseQuery, Version.V_6_0_0); - assertThat(result.verified, is(false)); - assertThat(result.minimumShouldMatch, equalTo(1)); - List terms = new ArrayList<>(result.extractions); - assertThat(terms.size(), equalTo(1)); - assertThat(terms.get(0).field(), equalTo("_field")); - assertThat(terms.get(0).bytes().utf8ToString(), equalTo("_very_long_term")); - } - public void testExtractQueryMetadata_multiPhraseQuery_dups() { MultiPhraseQuery multiPhraseQuery = new MultiPhraseQuery.Builder() .add(new Term("_field", "_term1")) @@ -211,35 +194,6 @@ public void testExtractQueryMetadata_booleanQuery() { assertThat(terms.get(4).bytes(), equalTo(termQuery3.getTerm().bytes())); } - public void testExtractQueryMetadata_booleanQuery_pre6dot1() { - BooleanQuery.Builder builder = new BooleanQuery.Builder(); - TermQuery termQuery1 = new TermQuery(new Term("_field", "_term")); - builder.add(termQuery1, BooleanClause.Occur.SHOULD); - PhraseQuery phraseQuery = new PhraseQuery("_field", "_term1", "term2"); - builder.add(phraseQuery, BooleanClause.Occur.SHOULD); - - BooleanQuery.Builder subBuilder = new BooleanQuery.Builder(); - TermQuery termQuery2 = new TermQuery(new Term("_field1", "_term")); - subBuilder.add(termQuery2, BooleanClause.Occur.MUST); - TermQuery termQuery3 = new TermQuery(new Term("_field3", "_long_term")); - subBuilder.add(termQuery3, BooleanClause.Occur.MUST); - builder.add(subBuilder.build(), BooleanClause.Occur.SHOULD); - - BooleanQuery booleanQuery = builder.build(); - Result result = analyze(booleanQuery, Version.V_6_0_0); - assertThat("Should clause with phrase query isn't verified, so entire query can't be verified", result.verified, is(false)); - assertThat(result.minimumShouldMatch, equalTo(1)); - List terms = new ArrayList<>(result.extractions); - terms.sort(Comparator.comparing(qt -> qt.term)); - assertThat(terms.size(), equalTo(3)); - assertThat(terms.get(0).field(), equalTo(termQuery1.getTerm().field())); - assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes())); - assertThat(terms.get(1).field(), equalTo(phraseQuery.getTerms()[0].field())); - assertThat(terms.get(1).bytes(), equalTo(phraseQuery.getTerms()[0].bytes())); - assertThat(terms.get(2).field(), equalTo(termQuery3.getTerm().field())); - assertThat(terms.get(2).bytes(), equalTo(termQuery3.getTerm().bytes())); - } - public void testExtractQueryMetadata_booleanQuery_msm() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.setMinimumNumberShouldMatch(2); @@ -326,28 +280,6 @@ public void testExtractQueryMetadata_booleanQuery_msm() { assertFalse(result.verified); } - public void testExtractQueryMetadata_booleanQuery_msm_pre6dot1() { - BooleanQuery.Builder builder = new BooleanQuery.Builder(); - builder.setMinimumNumberShouldMatch(2); - TermQuery termQuery1 = new TermQuery(new Term("_field", "_term1")); - builder.add(termQuery1, BooleanClause.Occur.SHOULD); - TermQuery termQuery2 = new TermQuery(new Term("_field", "_term2")); - builder.add(termQuery2, BooleanClause.Occur.SHOULD); - TermQuery termQuery3 = new TermQuery(new Term("_field", "_term3")); - builder.add(termQuery3, BooleanClause.Occur.SHOULD); - - BooleanQuery booleanQuery = builder.build(); - Result result = analyze(booleanQuery, Version.V_6_0_0); - assertThat(result.verified, is(false)); - assertThat(result.minimumShouldMatch, equalTo(1)); - List extractions = new ArrayList<>(result.extractions); - extractions.sort(Comparator.comparing(extraction -> extraction.term)); - assertThat(extractions.size(), equalTo(3)); - assertThat(extractions.get(0).term, equalTo(new Term("_field", "_term1"))); - assertThat(extractions.get(1).term, equalTo(new Term("_field", "_term2"))); - assertThat(extractions.get(2).term, equalTo(new Term("_field", "_term3"))); - } - public void testExtractQueryMetadata_booleanQuery_onlyShould() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); TermQuery termQuery1 = new TermQuery(new Term("_field", "_term1")); @@ -402,7 +334,7 @@ public void testExtractQueryMetadata_booleanQueryWithMustNot() { assertThat(result.minimumShouldMatch, equalTo(0)); assertTermsEqual(result.extractions); - result = analyze(booleanQuery, Version.V_6_0_0); + result = analyze(booleanQuery, Version.CURRENT); assertThat(result.matchAllDocs, is(true)); assertThat(result.verified, is(false)); assertThat(result.minimumShouldMatch, equalTo(0)); @@ -657,18 +589,6 @@ public void testExtractQueryMetadata_spanNearQuery() { assertTermsEqual(result.extractions, spanTermQuery1.getTerm(), spanTermQuery2.getTerm()); } - public void testExtractQueryMetadata_spanNearQuery_pre6dot1() { - SpanTermQuery spanTermQuery1 = new SpanTermQuery(new Term("_field", "_short_term")); - SpanTermQuery spanTermQuery2 = new SpanTermQuery(new Term("_field", "_very_long_term")); - SpanNearQuery spanNearQuery = new SpanNearQuery.Builder("_field", true) - .addClause(spanTermQuery1).addClause(spanTermQuery2).build(); - - Result result = analyze(spanNearQuery, Version.V_6_0_0); - assertThat(result.verified, is(false)); - assertThat(result.minimumShouldMatch, equalTo(1)); - assertTermsEqual(result.extractions, spanTermQuery2.getTerm()); - } - public void testExtractQueryMetadata_spanOrQuery() { SpanTermQuery spanTermQuery1 = new SpanTermQuery(new Term("_field", "_short_term")); SpanTermQuery spanTermQuery2 = new SpanTermQuery(new Term("_field", "_very_long_term")); @@ -966,138 +886,6 @@ public void testFunctionScoreQuery_withMatchAll() { assertThat(result.extractions.isEmpty(), is(true)); } - public void testSelectBestResult() { - Set queryTerms1 = terms(new int[0], "12", "1234", "12345"); - Result result1 = new Result(true, queryTerms1, 1); - Set queryTerms2 = terms(new int[0], "123", "1234", "12345"); - Result result2 = new Result(true, queryTerms2, 1); - Result result = selectBestResult(result1, result2); - assertSame(queryTerms2, result.extractions); - assertFalse(result.verified); - - queryTerms1 = terms(new int[]{1, 2, 3}); - result1 = new Result(true, queryTerms1, 1); - queryTerms2 = terms(new int[]{2, 3, 4}); - result2 = new Result(true, queryTerms2, 1); - result = selectBestResult(result1, result2); - assertSame(queryTerms1, result.extractions); - assertFalse(result.verified); - - queryTerms1 = terms(new int[]{4, 5, 6}); - result1 = new Result(true, queryTerms1, 1); - queryTerms2 = terms(new int[]{1, 2, 3}); - result2 = new Result(true, queryTerms2, 1); - result = selectBestResult(result1, result2); - assertSame(queryTerms2, result.extractions); - assertFalse(result.verified); - - queryTerms1 = terms(new int[]{1, 2, 3}, "123", "456"); - result1 = new Result(true, queryTerms1, 1); - queryTerms2 = terms(new int[]{2, 3, 4}, "123", "456"); - result2 = new Result(true, queryTerms2, 1); - result = selectBestResult(result1, result2); - assertSame(queryTerms1, result.extractions); - assertFalse(result.verified); - - queryTerms1 = terms(new int[]{10}); - result1 = new Result(true, queryTerms1, 1); - queryTerms2 = terms(new int[]{1}); - result2 = new Result(true, queryTerms2, 1); - result = selectBestResult(result1, result2); - assertSame(queryTerms2, result.extractions); - - queryTerms1 = terms(new int[]{10}, "123"); - result1 = new Result(true, queryTerms1, 1); - queryTerms2 = terms(new int[]{1}); - result2 = new Result(true, queryTerms2, 1); - result = selectBestResult(result1, result2); - assertSame(queryTerms1, result.extractions); - assertFalse(result.verified); - - queryTerms1 = terms(new int[]{10}, "1", "123"); - result1 = new Result(true, queryTerms1, 1); - queryTerms2 = terms(new int[]{1}, "1", "2"); - result2 = new Result(true, queryTerms2, 1); - result = selectBestResult(result1, result2); - assertSame(queryTerms1, result.extractions); - assertFalse(result.verified); - - queryTerms1 = terms(new int[]{1, 2, 3}, "123", "456"); - result1 = new Result(true, queryTerms1, 1); - queryTerms2 = terms(new int[]{2, 3, 4}, "1", "456"); - result2 = new Result(true, queryTerms2, 1); - result = selectBestResult(result1, result2); - assertSame("Ignoring ranges, so then prefer queryTerms1, because it has the longest shortest term", - queryTerms1, result.extractions); - assertFalse(result.verified); - - queryTerms1 = terms(new int[]{}); - result1 = new Result(false, queryTerms1, 0); - queryTerms2 = terms(new int[]{}); - result2 = new Result(false, queryTerms2, 0); - result = selectBestResult(result1, result2); - assertSame("In case query extractions are empty", queryTerms2, result.extractions); - assertFalse(result.verified); - - queryTerms1 = terms(new int[]{1}); - result1 = new Result(true, queryTerms1, 1); - queryTerms2 = terms(new int[]{}); - result2 = new Result(false, queryTerms2, 0); - result = selectBestResult(result1, result2); - assertSame("In case query a single extraction is empty", queryTerms1, result.extractions); - assertFalse(result.verified); - - queryTerms1 = terms(new int[]{}); - result1 = new Result(false, queryTerms1, 0); - queryTerms2 = terms(new int[]{1}); - result2 = new Result(true, queryTerms2, 1); - result = selectBestResult(result1, result2); - assertSame("In case query a single extraction is empty", queryTerms2, result.extractions); - assertFalse(result.verified); - - result1 = new Result(true, true); - queryTerms2 = terms(new int[]{1}); - result2 = new Result(true, queryTerms2, 1); - result = selectBestResult(result1, result2); - assertSame("Conjunction with a match_all", result2, result); - assertTrue(result.verified); - - queryTerms1 = terms(new int[]{1}); - result1 = new Result(true, queryTerms2, 1); - result2 = new Result(true, true); - result = selectBestResult(result1, result2); - assertSame("Conjunction with a match_all", result1, result); - assertTrue(result.verified); - } - - public void testselectBestResult_random() { - Set terms1 = new HashSet<>(); - int shortestTerms1Length = Integer.MAX_VALUE; - int sumTermLength = randomIntBetween(1, 128); - while (sumTermLength > 0) { - int length = randomInt(sumTermLength); - shortestTerms1Length = Math.min(shortestTerms1Length, length); - terms1.add(new QueryExtraction(new Term("field", randomAlphaOfLength(length)))); - sumTermLength -= length; - } - - Set terms2 = new HashSet<>(); - int shortestTerms2Length = Integer.MAX_VALUE; - sumTermLength = randomIntBetween(1, 128); - while (sumTermLength > 0) { - int length = randomInt(sumTermLength); - shortestTerms2Length = Math.min(shortestTerms2Length, length); - terms2.add(new QueryExtraction(new Term("field", randomAlphaOfLength(length)))); - sumTermLength -= length; - } - - Result result1 = new Result(true, terms1, 1); - Result result2 = new Result(true, terms2, 1); - Result result = selectBestResult(result1, result2); - Set expected = shortestTerms1Length >= shortestTerms2Length ? terms1 : terms2; - assertThat(result.extractions, sameInstance(expected)); - } - public void testPointRangeQuery() { // int ranges get converted to long ranges: Query query = IntPoint.newRangeQuery("_field", 10, 20); @@ -1213,53 +1001,6 @@ public void testToParentBlockJoinQuery() { assertEquals(new Term("field", "value"), result.extractions.toArray(new QueryExtraction[0])[0].term); } - public void testPointRangeQuerySelectShortestRange() { - BooleanQuery.Builder boolQuery = new BooleanQuery.Builder(); - boolQuery.add(LongPoint.newRangeQuery("_field1", 10, 20), BooleanClause.Occur.FILTER); - boolQuery.add(LongPoint.newRangeQuery("_field2", 10, 15), BooleanClause.Occur.FILTER); - Result result = analyze(boolQuery.build(), Version.V_6_0_0); - assertFalse(result.verified); - assertThat(result.minimumShouldMatch, equalTo(1)); - assertEquals(1, result.extractions.size()); - assertEquals("_field2", new ArrayList<>(result.extractions).get(0).range.fieldName); - - boolQuery = new BooleanQuery.Builder(); - boolQuery.add(LongPoint.newRangeQuery("_field1", 10, 20), BooleanClause.Occur.FILTER); - boolQuery.add(IntPoint.newRangeQuery("_field2", 10, 15), BooleanClause.Occur.FILTER); - result = analyze(boolQuery.build(), Version.V_6_0_0); - assertFalse(result.verified); - assertThat(result.minimumShouldMatch, equalTo(1)); - assertEquals(1, result.extractions.size()); - assertEquals("_field2", new ArrayList<>(result.extractions).get(0).range.fieldName); - - boolQuery = new BooleanQuery.Builder(); - boolQuery.add(DoublePoint.newRangeQuery("_field1", 10, 20), BooleanClause.Occur.FILTER); - boolQuery.add(DoublePoint.newRangeQuery("_field2", 10, 15), BooleanClause.Occur.FILTER); - result = analyze(boolQuery.build(), Version.V_6_0_0); - assertFalse(result.verified); - assertThat(result.minimumShouldMatch, equalTo(1)); - assertEquals(1, result.extractions.size()); - assertEquals("_field2", new ArrayList<>(result.extractions).get(0).range.fieldName); - - boolQuery = new BooleanQuery.Builder(); - boolQuery.add(DoublePoint.newRangeQuery("_field1", 10, 20), BooleanClause.Occur.FILTER); - boolQuery.add(FloatPoint.newRangeQuery("_field2", 10, 15), BooleanClause.Occur.FILTER); - result = analyze(boolQuery.build(), Version.V_6_0_0); - assertFalse(result.verified); - assertThat(result.minimumShouldMatch, equalTo(1)); - assertEquals(1, result.extractions.size()); - assertEquals("_field2", new ArrayList<>(result.extractions).get(0).range.fieldName); - - boolQuery = new BooleanQuery.Builder(); - boolQuery.add(HalfFloatPoint.newRangeQuery("_field1", 10, 20), BooleanClause.Occur.FILTER); - boolQuery.add(HalfFloatPoint.newRangeQuery("_field2", 10, 15), BooleanClause.Occur.FILTER); - result = analyze(boolQuery.build(), Version.V_6_0_0); - assertFalse(result.verified); - assertThat(result.minimumShouldMatch, equalTo(1)); - assertEquals(1, result.extractions.size()); - assertEquals("_field2", new ArrayList<>(result.extractions).get(0).range.fieldName); - } - public void testPointRangeQuerySelectRanges() { BooleanQuery.Builder boolQuery = new BooleanQuery.Builder(); boolQuery.add(LongPoint.newRangeQuery("_field1", 10, 20), BooleanClause.Occur.SHOULD); From 458d94220307451bd34425e7fcd6181140d0bb46 Mon Sep 17 00:00:00 2001 From: Daniel Schneiter Date: Thu, 9 May 2019 15:06:10 +0200 Subject: [PATCH 043/321] Mentioned the name of the icu_analyzer --- docs/plugins/analysis-icu.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc index b6299139992d1..69d741fa79a90 100644 --- a/docs/plugins/analysis-icu.asciidoc +++ b/docs/plugins/analysis-icu.asciidoc @@ -29,7 +29,7 @@ include::install_remove.asciidoc[] [[analysis-icu-analyzer]] ==== ICU Analyzer -Performs basic normalization, tokenization and character folding, using the +The `icu_analyzer` analyzer performs basic normalization, tokenization and character folding, using the `icu_normalizer` char filter, `icu_tokenizer` and `icu_normalizer` token filter The following parameters are accepted: From 3647d7c94e9c893631e7efec096d0422f4f6f30a Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 9 May 2019 14:41:11 +0100 Subject: [PATCH 044/321] Removes typed REST endpoints from search and related APIs (#41640) --- .../migration/migrate_8_0/reindex.asciidoc | 7 +- .../migration/migrate_8_0/search.asciidoc | 10 ++ .../RestMultiSearchTemplateAction.java | 18 ---- .../mustache/RestSearchTemplateAction.java | 4 - .../RestMultiSearchTemplateActionTests.java | 66 ------------ .../RestSearchTemplateActionTests.java | 61 ----------- .../reindex/RestDeleteByQueryAction.java | 1 - .../reindex/RestUpdateByQueryAction.java | 1 - .../reindex/RestDeleteByQueryActionTests.java | 64 ----------- .../reindex/RestUpdateByQueryActionTests.java | 65 ----------- .../rest-api-spec/api/delete_by_query.json | 11 -- .../resources/rest-api-spec/api/msearch.json | 11 -- .../rest-api-spec/api/msearch_template.json | 11 -- .../rest-api-spec/api/mtermvectors.json | 11 -- .../resources/rest-api-spec/api/search.json | 11 -- .../rest-api-spec/api/search_template.json | 11 -- .../rest-api-spec/api/termvectors.json | 17 --- .../rest-api-spec/api/update_by_query.json | 11 -- .../test/mtermvectors/11_basic_with_types.yml | 86 --------------- .../mtermvectors/21_deprecated_with_types.yml | 51 --------- .../mtermvectors/30_mix_typeless_typeful.yml | 29 ----- .../test/termvectors/11_basic_with_types.yml | 36 ------- .../termvectors/21_issue7121_with_types.yml | 42 -------- .../termvectors/31_realtime_with_types.yml | 40 ------- .../termvectors/50_mix_typeless_typeful.yml | 42 -------- .../document/RestMultiTermVectorsAction.java | 18 +--- .../document/RestTermVectorsAction.java | 23 +--- .../action/search/RestMultiSearchAction.java | 21 +--- .../rest/action/search/RestSearchAction.java | 14 --- .../RestMultiTermVectorsActionTests.java | 87 --------------- .../document/RestTermVectorsActionTests.java | 67 ------------ .../search/RestMultiSearchActionTests.java | 66 ------------ .../action/search/RestSearchActionTests.java | 61 ----------- .../integration/IndexPrivilegeTests.java | 102 +++++++++--------- 34 files changed, 72 insertions(+), 1104 deletions(-) create mode 100644 docs/reference/migration/migrate_8_0/search.asciidoc delete mode 100644 modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateActionTests.java delete mode 100644 modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java delete mode 100644 modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestDeleteByQueryActionTests.java delete mode 100644 modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestUpdateByQueryActionTests.java delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/11_basic_with_types.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/21_deprecated_with_types.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/30_mix_typeless_typeful.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/11_basic_with_types.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/21_issue7121_with_types.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/31_realtime_with_types.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_mix_typeless_typeful.yml delete mode 100644 server/src/test/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsActionTests.java delete mode 100644 server/src/test/java/org/elasticsearch/rest/action/document/RestTermVectorsActionTests.java delete mode 100644 server/src/test/java/org/elasticsearch/rest/action/search/RestMultiSearchActionTests.java delete mode 100644 server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java diff --git a/docs/reference/migration/migrate_8_0/reindex.asciidoc b/docs/reference/migration/migrate_8_0/reindex.asciidoc index ebba0f2aebe1d..ef4f5aed147ca 100644 --- a/docs/reference/migration/migrate_8_0/reindex.asciidoc +++ b/docs/reference/migration/migrate_8_0/reindex.asciidoc @@ -7,4 +7,9 @@ re-encode them when generating the search request for the remote host. This leniency has been removed such that all index-names are correctly encoded when reindex generates remote search requests. -Instead, please specify the index-name without any encoding. \ No newline at end of file +Instead, please specify the index-name without any encoding. + +[float] +==== Removal of types + +The `/{index}/{type}/_delete_by_query` and `/{index}/{type}/_update_by_query` REST endpoints have been removed in favour of `/{index}/_delete_by_query` and `/{index}/_update_by_query`, since indexes no longer contain types, these typed endpoints are obsolete. \ No newline at end of file diff --git a/docs/reference/migration/migrate_8_0/search.asciidoc b/docs/reference/migration/migrate_8_0/search.asciidoc new file mode 100644 index 0000000000000..82886d35bc6a5 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/search.asciidoc @@ -0,0 +1,10 @@ +[float] +[[breaking_80_search_changes]] +=== Search Changes + +[float] +==== Removal of types + +The `/{index}/{type}/_search`, `/{index}/{type}/_msearch`, `/{index}/{type}/_search/template` and `/{index}/{type}/_msearch/template` REST endpoints have been removed in favour of `/{index}/_search`, `/{index}/_msearch`, `/{index}/_search/template` and `/{index}/_msearch/template`, since indexes no longer contain types, these typed endpoints are obsolete.. + +The `/{index}/{type}/_termvectors`, `/{index}/{type}/{id}/_termvectors` and `/{index}/{type}/_mtermvectors` REST endpoints have been removed in favour of `/{index}/_termvectors`, `/{index}/{id}/_termvectors` and `/{index}/_mtermvectors`, since indexes no longer contain types, these typed endpoints are obsolete.. \ No newline at end of file diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java index 2c46b6f694add..5195ce9396313 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java @@ -19,9 +19,7 @@ package org.elasticsearch.script.mustache; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; @@ -40,10 +38,6 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestMultiSearchTemplateAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = new DeprecationLogger( - LogManager.getLogger(RestMultiSearchTemplateAction.class)); - static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + - " Specifying types in multi search template requests is deprecated."; private static final Set RESPONSE_PARAMS; @@ -65,10 +59,6 @@ public RestMultiSearchTemplateAction(Settings settings, RestController controlle controller.registerHandler(POST, "/_msearch/template", this); controller.registerHandler(GET, "/{index}/_msearch/template", this); controller.registerHandler(POST, "/{index}/_msearch/template", this); - - // Deprecated typed endpoints. - controller.registerHandler(GET, "/{index}/{type}/_msearch/template", this); - controller.registerHandler(POST, "/{index}/{type}/_msearch/template", this); } @Override @@ -79,14 +69,6 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { MultiSearchTemplateRequest multiRequest = parseRequest(request, allowExplicitIndex); - - // Emit a single deprecation message if any search template contains types. - for (SearchTemplateRequest searchTemplateRequest : multiRequest.requests()) { - if (searchTemplateRequest.getRequest().types().length > 0) { - deprecationLogger.deprecatedAndMaybeLog("msearch_with_types", TYPES_DEPRECATION_MESSAGE); - break; - } - } return channel -> client.execute(MultiSearchTemplateAction.INSTANCE, multiRequest, new RestToXContentListener<>(channel)); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java index 70a12f0c8bf56..f80d6ef43d05e 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java @@ -54,10 +54,6 @@ public RestSearchTemplateAction(Settings settings, RestController controller) { controller.registerHandler(POST, "/_search/template", this); controller.registerHandler(GET, "/{index}/_search/template", this); controller.registerHandler(POST, "/{index}/_search/template", this); - - // Deprecated typed endpoints. - controller.registerHandler(GET, "/{index}/{type}/_search/template", this); - controller.registerHandler(POST, "/{index}/{type}/_search/template", this); } @Override diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateActionTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateActionTests.java deleted file mode 100644 index eacb1e3c4e803..0000000000000 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateActionTests.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.script.mustache; - -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.test.rest.FakeRestRequest; -import org.elasticsearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.nio.charset.StandardCharsets; - -public class RestMultiSearchTemplateActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - new RestMultiSearchTemplateAction(Settings.EMPTY, controller()); - } - - public void testTypeInPath() { - String content = "{ \"index\": \"some_index\" } \n" + - "{\"source\": {\"query\" : {\"match_all\" :{}}}} \n"; - BytesArray bytesContent = new BytesArray(content.getBytes(StandardCharsets.UTF_8)); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(RestRequest.Method.GET) - .withPath("/some_index/some_type/_msearch/template") - .withContent(bytesContent, XContentType.JSON) - .build(); - - dispatchRequest(request); - assertWarnings(RestMultiSearchTemplateAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeInBody() { - String content = "{ \"index\": \"some_index\", \"type\": \"some_type\" } \n" + - "{\"source\": {\"query\" : {\"match_all\" :{}}}} \n"; - BytesArray bytesContent = new BytesArray(content.getBytes(StandardCharsets.UTF_8)); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withPath("/some_index/_msearch/template") - .withContent(bytesContent, XContentType.JSON) - .build(); - - dispatchRequest(request); - assertWarnings(RestMultiSearchTemplateAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java deleted file mode 100644 index 0da8afbae0402..0000000000000 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.script.mustache; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.test.rest.FakeRestRequest; -import org.elasticsearch.test.rest.RestActionTestCase; -import org.junit.Before; - -import java.util.HashMap; -import java.util.Map; - -public class RestSearchTemplateActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - new RestSearchTemplateAction(Settings.EMPTY, controller()); - } - - public void testTypeInPath() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(RestRequest.Method.GET) - .withPath("/some_index/some_type/_search/template") - .build(); - - dispatchRequest(request); - assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeParameter() { - Map params = new HashMap<>(); - params.put("type", "some_type"); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(RestRequest.Method.GET) - .withPath("/some_index/_search/template") - .withParams(params) - .build(); - - dispatchRequest(request); - assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java index be232ca7c402f..5d4d140131a14 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java @@ -35,7 +35,6 @@ public class RestDeleteByQueryAction extends AbstractBulkByQueryRestHandler RESPONSE_PARAMS; @@ -73,10 +67,6 @@ public RestMultiSearchAction(Settings settings, RestController controller) { controller.registerHandler(GET, "/{index}/_msearch", this); controller.registerHandler(POST, "/{index}/_msearch", this); - // Deprecated typed endpoints. - controller.registerHandler(GET, "/{index}/{type}/_msearch", this); - controller.registerHandler(POST, "/{index}/{type}/_msearch", this); - this.allowExplicitIndex = MULTI_ALLOW_EXPLICIT_INDEX.get(settings); } @@ -88,14 +78,6 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { MultiSearchRequest multiSearchRequest = parseRequest(request, allowExplicitIndex); - - // Emit a single deprecation message if any search request contains types. - for (SearchRequest searchRequest : multiSearchRequest.requests()) { - if (searchRequest.types().length > 0) { - deprecationLogger.deprecatedAndMaybeLog("msearch_with_types", TYPES_DEPRECATION_MESSAGE); - break; - } - } return channel -> client.multiSearch(multiSearchRequest, new RestToXContentListener<>(channel)); } @@ -145,7 +127,6 @@ public static void parseMultiLineRequest(RestRequest request, IndicesOptions ind CheckedBiConsumer consumer) throws IOException { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); - String[] types = Strings.splitStringByCommaToArray(request.param("type")); String searchType = request.param("search_type"); boolean ccsMinimizeRoundtrips = request.paramAsBoolean("ccs_minimize_roundtrips", true); String routing = request.param("routing"); @@ -153,7 +134,7 @@ public static void parseMultiLineRequest(RestRequest request, IndicesOptions ind final Tuple sourceTuple = request.contentOrSourceParam(); final XContent xContent = sourceTuple.v1().xContent(); final BytesReference data = sourceTuple.v2(); - MultiSearchRequest.readMultiLineFormat(data, xContent, consumer, indices, indicesOptions, types, routing, + MultiSearchRequest.readMultiLineFormat(data, xContent, consumer, indices, indicesOptions, Strings.EMPTY_ARRAY, routing, searchType, ccsMinimizeRoundtrips, request.getXContentRegistry(), allowExplicitIndex); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 00c08a124f1e4..95695bec4f0c1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -19,13 +19,11 @@ package org.elasticsearch.rest.action.search; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; @@ -69,20 +67,12 @@ public class RestSearchAction extends BaseRestHandler { RESPONSE_PARAMS = Collections.unmodifiableSet(responseParams); } - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestSearchAction.class)); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + - " Specifying types in search requests is deprecated."; - public RestSearchAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(GET, "/_search", this); controller.registerHandler(POST, "/_search", this); controller.registerHandler(GET, "/{index}/_search", this); controller.registerHandler(POST, "/{index}/_search", this); - - // Deprecated typed endpoints. - controller.registerHandler(GET, "/{index}/{type}/_search", this); - controller.registerHandler(POST, "/{index}/{type}/_search", this); } @Override @@ -166,10 +156,6 @@ public static void parseSearchRequest(SearchRequest searchRequest, RestRequest r searchRequest.scroll(new Scroll(parseTimeValue(scroll, null, "scroll"))); } - if (request.hasParam("type")) { - deprecationLogger.deprecatedAndMaybeLog("search_with_types", TYPES_DEPRECATION_MESSAGE); - searchRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); - } searchRequest.routing(request.param("routing")); searchRequest.preference(request.param("preference")); searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsActionTests.java deleted file mode 100644 index dd98089246b51..0000000000000 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestMultiTermVectorsActionTests.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.document; - -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestRequest.Method; -import org.elasticsearch.test.rest.RestActionTestCase; -import org.elasticsearch.test.rest.FakeRestRequest; -import org.junit.Before; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -public class RestMultiTermVectorsActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - new RestMultiTermVectorsAction(Settings.EMPTY, controller()); - } - - public void testTypeInPath() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(Method.POST) - .withPath("/some_index/some_type/_mtermvectors") - .build(); - - dispatchRequest(request); - assertWarnings(RestMultiTermVectorsAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeParameter() { - Map params = new HashMap<>(); - params.put("type", "some_type"); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(Method.GET) - .withPath("/some_index/_mtermvectors") - .withParams(params) - .build(); - - dispatchRequest(request); - assertWarnings(RestMultiTermVectorsAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeInBody() throws IOException { - XContentBuilder content = XContentFactory.jsonBuilder().startObject() - .startArray("docs") - .startObject() - .field("_type", "some_type") - .field("_id", 1) - .endObject() - .endArray() - .endObject(); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(Method.GET) - .withPath("/some_index/_mtermvectors") - .withContent(BytesReference.bytes(content), XContentType.JSON) - .build(); - - dispatchRequest(request); - assertWarnings(RestTermVectorsAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestTermVectorsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestTermVectorsActionTests.java deleted file mode 100644 index d93f7749f63e3..0000000000000 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestTermVectorsActionTests.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.document; - -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestRequest.Method; -import org.elasticsearch.test.rest.RestActionTestCase; -import org.elasticsearch.test.rest.FakeRestRequest; -import org.junit.Before; - -import java.io.IOException; - -public class RestTermVectorsActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - new RestTermVectorsAction(Settings.EMPTY, controller()); - } - - public void testTypeInPath() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(Method.POST) - .withPath("/some_index/some_type/some_id/_termvectors") - .build(); - - dispatchRequest(request); - assertWarnings(RestTermVectorsAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeInBody() throws IOException { - XContentBuilder content = XContentFactory.jsonBuilder().startObject() - .field("_type", "some_type") - .field("_id", 1) - .endObject(); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(Method.GET) - .withPath("/some_index/_termvectors/some_id") - .withContent(BytesReference.bytes(content), XContentType.JSON) - .build(); - - dispatchRequest(request); - assertWarnings(RestTermVectorsAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestMultiSearchActionTests.java deleted file mode 100644 index 3b80d2002c5c5..0000000000000 --- a/server/src/test/java/org/elasticsearch/rest/action/search/RestMultiSearchActionTests.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.search; - -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.test.rest.RestActionTestCase; -import org.elasticsearch.test.rest.FakeRestRequest; -import org.junit.Before; - -import java.nio.charset.StandardCharsets; - -public class RestMultiSearchActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - new RestMultiSearchAction(Settings.EMPTY, controller()); - } - - public void testTypeInPath() { - String content = "{ \"index\": \"some_index\" } \n {} \n"; - BytesArray bytesContent = new BytesArray(content.getBytes(StandardCharsets.UTF_8)); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(RestRequest.Method.GET) - .withPath("/some_index/some_type/_msearch") - .withContent(bytesContent, XContentType.JSON) - .build(); - - dispatchRequest(request); - assertWarnings(RestMultiSearchAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeInBody() { - String content = "{ \"index\": \"some_index\", \"type\": \"some_type\" } \n {} \n"; - BytesArray bytesContent = new BytesArray(content.getBytes(StandardCharsets.UTF_8)); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(RestRequest.Method.POST) - .withPath("/some_index/_msearch") - .withContent(bytesContent, XContentType.JSON) - .build(); - - dispatchRequest(request); - assertWarnings(RestMultiSearchAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java deleted file mode 100644 index 522d04b37c663..0000000000000 --- a/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.search; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.test.rest.RestActionTestCase; -import org.elasticsearch.test.rest.FakeRestRequest; -import org.junit.Before; - -import java.util.HashMap; -import java.util.Map; - -public class RestSearchActionTests extends RestActionTestCase { - - @Before - public void setUpAction() { - new RestSearchAction(Settings.EMPTY, controller()); - } - - public void testTypeInPath() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(RestRequest.Method.GET) - .withPath("/some_index/some_type/_search") - .build(); - - dispatchRequest(request); - assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); - } - - public void testTypeParameter() { - Map params = new HashMap<>(); - params.put("type", "some_type"); - - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(RestRequest.Method.GET) - .withPath("/some_index/_search") - .withParams(params) - .build(); - - dispatchRequest(request); - assertWarnings(RestSearchAction.TYPES_DEPRECATION_MESSAGE); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java index 7305b2f1902cf..1fe3eeaef52ee 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java @@ -158,10 +158,10 @@ public void testUserU1() throws Exception { assertUserIsDenied("u1", "all", "b"); assertUserIsDenied("u1", "all", "c"); assertAccessIsAllowed("u1", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsAllowed("u1", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u1", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertAccessIsAllowed("u1", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsAllowed("u1", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -175,10 +175,10 @@ public void testUserU2() throws Exception { assertUserIsDenied("u2", "create_index", "b"); assertUserIsDenied("u2", "all", "c"); assertAccessIsAllowed("u2", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsAllowed("u2", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u2", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertAccessIsAllowed("u2", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsAllowed("u2", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -189,10 +189,10 @@ public void testUserU3() throws Exception { assertUserIsAllowed("u3", "all", "b"); assertUserIsDenied("u3", "all", "c"); assertAccessIsAllowed("u3", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsAllowed("u3", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u3", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertAccessIsAllowed("u3", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsAllowed("u3", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -213,10 +213,10 @@ public void testUserU4() throws Exception { assertUserIsAllowed("u4", "manage", "an_index"); assertAccessIsAllowed("u4", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsAllowed("u4", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u4", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertAccessIsDenied("u4", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsAllowed("u4", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -232,10 +232,10 @@ public void testUserU5() throws Exception { assertUserIsDenied("u5", "write", "b"); assertAccessIsAllowed("u5", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsAllowed("u5", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u5", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertAccessIsDenied("u5", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsAllowed("u5", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -248,10 +248,10 @@ public void testUserU6() throws Exception { assertUserIsDenied("u6", "write", "b"); assertUserIsDenied("u6", "all", "c"); assertAccessIsAllowed("u6", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsAllowed("u6", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u6", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertAccessIsAllowed("u6", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsAllowed("u6", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -262,10 +262,10 @@ public void testUserU7() throws Exception { assertUserIsDenied("u7", "all", "b"); assertUserIsDenied("u7", "all", "c"); assertAccessIsDenied("u7", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsDenied("u7", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsDenied("u7", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertAccessIsDenied("u7", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsDenied("u7", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -276,10 +276,10 @@ public void testUserU8() throws Exception { assertUserIsAllowed("u8", "all", "b"); assertUserIsAllowed("u8", "all", "c"); assertAccessIsAllowed("u8", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsAllowed("u8", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u8", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertAccessIsAllowed("u8", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsAllowed("u8", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -293,10 +293,10 @@ public void testUserU9() throws Exception { assertUserIsDenied("u9", "write", "b"); assertUserIsDenied("u9", "all", "c"); assertAccessIsAllowed("u9", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsAllowed("u9", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u9", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertAccessIsAllowed("u9", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsAllowed("u9", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -316,10 +316,10 @@ public void testUserU11() throws Exception { assertUserIsDenied("u11", "monitor", "c"); assertAccessIsDenied("u11", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsDenied("u11", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsDenied("u11", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertBodyHasAccessIsDenied("u11", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsDenied("u11", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -333,10 +333,10 @@ public void testUserU12() throws Exception { assertUserIsDenied("u12", "manage", "c"); assertUserIsAllowed("u12", "data_access", "c"); assertAccessIsAllowed("u12", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsAllowed("u12", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u12", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertAccessIsAllowed("u12", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsAllowed("u12", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -355,10 +355,10 @@ public void testUserU13() throws Exception { assertUserIsDenied("u13", "all", "c"); assertAccessIsAllowed("u13", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); assertAccessIsAllowed("u13", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); - assertAccessIsAllowed("u13", "PUT", "/a/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); - assertBodyHasAccessIsDenied("u13", "PUT", "/b/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u13", "PUT", "/a/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertBodyHasAccessIsDenied("u13", "PUT", "/b/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsAllowed("u13", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -377,10 +377,10 @@ public void testUserU14() throws Exception { assertUserIsDenied("u14", "all", "c"); assertAccessIsAllowed("u14", - "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); - assertAccessIsAllowed("u14", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + "GET", "/" + randomIndex() + "/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u14", "POST", "/" + randomIndex() + "/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); assertAccessIsDenied("u14", "PUT", - "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + "/" + randomIndex() + "/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); assertAccessIsAllowed("u14", "GET", "/" + randomIndex() + "/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); } @@ -434,7 +434,7 @@ private void assertUserExecutes(String user, String action, String index, boolea assertAccessIsAllowed(user, "POST", "/" + index + "/_open"); assertAccessIsAllowed(user, "POST", "/" + index + "/_cache/clear"); // indexing a document to have the mapping available, and wait for green state to make sure index is created - assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/1", jsonDoc); + assertAccessIsAllowed("admin", "PUT", "/" + index + "/_doc/1", jsonDoc); assertNoTimeout(client().admin().cluster().prepareHealth(index).setWaitForGreenStatus().get()); assertAccessIsAllowed(user, "GET", "/" + index + "/_mapping/field/name"); assertAccessIsAllowed(user, "GET", "/" + index + "/_settings"); @@ -490,7 +490,7 @@ private void assertUserExecutes(String user, String action, String index, boolea assertAccessIsAllowed("admin", "GET", "/" + index + "/_refresh"); assertAccessIsAllowed(user, "GET", "/" + index + "/_count"); assertAccessIsAllowed("admin", "GET", "/" + index + "/_search"); - assertAccessIsAllowed("admin", "GET", "/" + index + "/foo/1"); + assertAccessIsAllowed("admin", "GET", "/" + index + "/_doc/1"); assertAccessIsAllowed(user, "GET", "/" + index + "/_explain/1", "{ \"query\" : { \"match_all\" : {} } }"); assertAccessIsAllowed(user, "GET", "/" + index + "/_termvectors/1"); assertUserIsAllowed(user, "search", index); @@ -513,30 +513,30 @@ private void assertUserExecutes(String user, String action, String index, boolea case "get" : if (userIsAllowed) { - assertAccessIsAllowed(user, "GET", "/" + index + "/foo/1"); + assertAccessIsAllowed(user, "GET", "/" + index + "/_doc/1"); } else { - assertAccessIsDenied(user, "GET", "/" + index + "/foo/1"); + assertAccessIsDenied(user, "GET", "/" + index + "/_doc/1"); } break; case "index" : if (userIsAllowed) { - assertAccessIsAllowed(user, "PUT", "/" + index + "/foo/321", "{ \"foo\" : \"bar\" }"); - assertAccessIsAllowed(user, "POST", "/" + index + "/foo/321/_update", "{ \"doc\" : { \"foo\" : \"baz\" } }"); + assertAccessIsAllowed(user, "PUT", "/" + index + "/_doc/321", "{ \"foo\" : \"bar\" }"); + assertAccessIsAllowed(user, "POST", "/" + index + "/_doc/321/_update", "{ \"doc\" : { \"foo\" : \"baz\" } }"); } else { - assertAccessIsDenied(user, "PUT", "/" + index + "/foo/321", "{ \"foo\" : \"bar\" }"); - assertAccessIsDenied(user, "POST", "/" + index + "/foo/321/_update", "{ \"doc\" : { \"foo\" : \"baz\" } }"); + assertAccessIsDenied(user, "PUT", "/" + index + "/_doc/321", "{ \"foo\" : \"bar\" }"); + assertAccessIsDenied(user, "POST", "/" + index + "/_doc/321/_update", "{ \"doc\" : { \"foo\" : \"baz\" } }"); } break; case "delete" : String jsonDoc = "{ \"name\" : \"docToDelete\"}"; - assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete", jsonDoc); - assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete2", jsonDoc); + assertAccessIsAllowed("admin", "PUT", "/" + index + "/_doc/docToDelete", jsonDoc); + assertAccessIsAllowed("admin", "PUT", "/" + index + "/_doc/docToDelete2", jsonDoc); if (userIsAllowed) { - assertAccessIsAllowed(user, "DELETE", "/" + index + "/foo/docToDelete"); + assertAccessIsAllowed(user, "DELETE", "/" + index + "/_doc/docToDelete"); } else { - assertAccessIsDenied(user, "DELETE", "/" + index + "/foo/docToDelete"); + assertAccessIsDenied(user, "DELETE", "/" + index + "/_doc/docToDelete"); } break; From 731dac765e28b6a492c0d93270f131a761cce12d Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Thu, 9 May 2019 09:51:12 -0400 Subject: [PATCH 045/321] [DOCS] Replace table with def list for ids query (#41865) --- docs/reference/query-dsl/ids-query.asciidoc | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docs/reference/query-dsl/ids-query.asciidoc b/docs/reference/query-dsl/ids-query.asciidoc index 70554e1acbf1b..43de8cb7332d3 100644 --- a/docs/reference/query-dsl/ids-query.asciidoc +++ b/docs/reference/query-dsl/ids-query.asciidoc @@ -21,8 +21,5 @@ GET /_search [[ids-query-top-level-parameters]] ==== Top-level parameters for `ids` -[cols="v,v",options="header"] -|====== -|Parameter |Description -|`values` |An array of <>. -|====== \ No newline at end of file +`values`:: +An array of <>. \ No newline at end of file From d2f93aea4c044da12e1571db49a874bc0eec256a Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Thu, 9 May 2019 08:02:43 -0600 Subject: [PATCH 046/321] Default seed address tests account for no IPv6 (#41971) This change makes the default seed address tests account for the lack of an IPv6 network. By default docker containers only run with IPv4 and these tests fail in a vanilla installation of elasticsearch-ci. To resolve this we only expect IPv6 seed addresses if IPv6 is available. Relates #41404 --- .../transport/TcpTransportTests.java | 55 +++++++++++-------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 80d183e499e25..17106508ae71a 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockPageCacheRecycler; @@ -97,54 +98,64 @@ public void testRejectsPortRanges() { } public void testDefaultSeedAddressesWithDefaultPort() { - testDefaultSeedAddresses(Settings.EMPTY, containsInAnyOrder( - "[::1]:9300", "[::1]:9301", "[::1]:9302", "[::1]:9303", "[::1]:9304", "[::1]:9305", - "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302", "127.0.0.1:9303", "127.0.0.1:9304", "127.0.0.1:9305")); + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder( + "[::1]:9300", "[::1]:9301", "[::1]:9302", "[::1]:9303", "[::1]:9304", "[::1]:9305", + "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302", "127.0.0.1:9303", "127.0.0.1:9304", "127.0.0.1:9305") : + containsInAnyOrder( + "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302", "127.0.0.1:9303", "127.0.0.1:9304", "127.0.0.1:9305"); + testDefaultSeedAddresses(Settings.EMPTY, seedAddressMatcher); } public void testDefaultSeedAddressesWithNonstandardGlobalPortRange() { - testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9500-9600").build(), containsInAnyOrder( - "[::1]:9500", "[::1]:9501", "[::1]:9502", "[::1]:9503", "[::1]:9504", "[::1]:9505", - "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505")); + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder( + "[::1]:9500", "[::1]:9501", "[::1]:9502", "[::1]:9503", "[::1]:9504", "[::1]:9505", + "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505") : + containsInAnyOrder( + "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505"); + testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9500-9600").build(), seedAddressMatcher); } public void testDefaultSeedAddressesWithSmallGlobalPortRange() { - testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9300-9302").build(), containsInAnyOrder( - "[::1]:9300", "[::1]:9301", "[::1]:9302", - "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302")); + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder("[::1]:9300", "[::1]:9301", "[::1]:9302", "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302") : + containsInAnyOrder("127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302"); + testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9300-9302").build(), seedAddressMatcher); } public void testDefaultSeedAddressesWithNonstandardProfilePortRange() { + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder("[::1]:9500", "[::1]:9501", "[::1]:9502", "[::1]:9503", "[::1]:9504", "[::1]:9505", + "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505") : + containsInAnyOrder("127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505"); testDefaultSeedAddresses(Settings.builder() .put(TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(TransportSettings.DEFAULT_PROFILE).getKey(), "9500-9600") - .build(), - containsInAnyOrder( - "[::1]:9500", "[::1]:9501", "[::1]:9502", "[::1]:9503", "[::1]:9504", "[::1]:9505", - "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505")); + .build(), seedAddressMatcher); } public void testDefaultSeedAddressesWithSmallProfilePortRange() { + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder("[::1]:9300", "[::1]:9301", "[::1]:9302", "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302") : + containsInAnyOrder("127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302"); testDefaultSeedAddresses(Settings.builder() .put(TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(TransportSettings.DEFAULT_PROFILE).getKey(), "9300-9302") - .build(), - containsInAnyOrder( - "[::1]:9300", "[::1]:9301", "[::1]:9302", - "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302")); + .build(), seedAddressMatcher); } public void testDefaultSeedAddressesPrefersProfileSettingToGlobalSetting() { + final Matcher> seedAddressMatcher = NetworkUtils.SUPPORTS_V6 ? + containsInAnyOrder("[::1]:9300", "[::1]:9301", "[::1]:9302", "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302") : + containsInAnyOrder("127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302"); testDefaultSeedAddresses(Settings.builder() .put(TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(TransportSettings.DEFAULT_PROFILE).getKey(), "9300-9302") .put(TransportSettings.PORT.getKey(), "9500-9600") - .build(), - containsInAnyOrder( - "[::1]:9300", "[::1]:9301", "[::1]:9302", - "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302")); + .build(), seedAddressMatcher); } public void testDefaultSeedAddressesWithNonstandardSinglePort() { testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9500").build(), - containsInAnyOrder("[::1]:9500", "127.0.0.1:9500")); + NetworkUtils.SUPPORTS_V6 ? containsInAnyOrder("[::1]:9500", "127.0.0.1:9500") : containsInAnyOrder("127.0.0.1:9500")); } private void testDefaultSeedAddresses(final Settings settings, Matcher> seedAddressesMatcher) { From 8a068935c6adb38c1c106ac1da2274d1d3310107 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 9 May 2019 10:10:27 -0400 Subject: [PATCH 047/321] Limit max direct memory size to half of heap size (#42006) This commit adds an ergonomic choice ot the max direct memory size such that if it is not set, we default it to half of the heap size. --- .../tools/launchers/JvmErgonomics.java | 8 +++ .../tools/launchers/JvmErgonomicsTests.java | 57 ++++++++++++++++--- 2 files changed, 56 insertions(+), 9 deletions(-) diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java index 12757c970496a..d0d5bef9cfcf4 100644 --- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java +++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmErgonomics.java @@ -63,6 +63,10 @@ static List choose(final List userDefinedJvmOptions) throws Inte ergonomicChoices.add("-Dio.netty.allocator.type=pooled"); } } + final long maxDirectMemorySize = extractMaxDirectMemorySize(finalJvmOptions); + if (maxDirectMemorySize == 0) { + ergonomicChoices.add("-XX:MaxDirectMemorySize=" + heapSize / 2); + } return ergonomicChoices; } @@ -120,6 +124,10 @@ static Long extractHeapSize(final Map> finalJvmOptions) return Long.parseLong(finalJvmOptions.get("MaxHeapSize").get()); } + static long extractMaxDirectMemorySize(final Map> finalJvmOptions) { + return Long.parseLong(finalJvmOptions.get("MaxDirectMemorySize").get()); + } + private static final Pattern SYSTEM_PROPERTY = Pattern.compile("^-D(?[\\w+].*?)=(?.*)$"); // package private for testing diff --git a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java index b5b6699f4716f..7fe5cd0cf98b0 100644 --- a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java +++ b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmErgonomicsTests.java @@ -23,13 +23,16 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @@ -85,6 +88,19 @@ public void testHeapSizeWithSpace() throws InterruptedException, IOException { } } + public void testMaxDirectMemorySizeUnset() throws InterruptedException, IOException { + assertThat( + JvmErgonomics.extractMaxDirectMemorySize(JvmErgonomics.finalJvmOptions(Collections.singletonList("-Xmx1g"))), + equalTo(0L)); + } + + public void testMaxDirectMemorySizeSet() throws InterruptedException, IOException { + assertThat( + JvmErgonomics.extractMaxDirectMemorySize(JvmErgonomics.finalJvmOptions( + Arrays.asList("-Xmx1g", "-XX:MaxDirectMemorySize=512m"))), + equalTo(512L << 20)); + } + public void testExtractSystemProperties() { Map expectedSystemProperties = new HashMap<>(); expectedSystemProperties.put("file.encoding", "UTF-8"); @@ -101,16 +117,39 @@ public void testExtractNoSystemProperties() { assertTrue(parsedSystemProperties.isEmpty()); } - public void testLittleMemoryErgonomicChoices() throws InterruptedException, IOException { - String smallHeap = randomFrom(Arrays.asList("64M", "512M", "1024M", "1G")); - List expectedChoices = Collections.singletonList("-Dio.netty.allocator.type=unpooled"); - assertEquals(expectedChoices, JvmErgonomics.choose(Arrays.asList("-Xms" + smallHeap, "-Xmx" + smallHeap))); + public void testPooledMemoryChoiceOnSmallHeap() throws InterruptedException, IOException { + final String smallHeap = randomFrom(Arrays.asList("64M", "512M", "1024M", "1G")); + assertThat( + JvmErgonomics.choose(Arrays.asList("-Xms" + smallHeap, "-Xmx" + smallHeap)), + hasItem("-Dio.netty.allocator.type=unpooled")); + } + + public void testPooledMemoryChoiceOnNotSmallHeap() throws InterruptedException, IOException { + final String largeHeap = randomFrom(Arrays.asList("1025M", "2048M", "2G", "8G")); + assertThat( + JvmErgonomics.choose(Arrays.asList("-Xms" + largeHeap, "-Xmx" + largeHeap)), + hasItem("-Dio.netty.allocator.type=pooled")); } - public void testPlentyMemoryErgonomicChoices() throws InterruptedException, IOException { - String largeHeap = randomFrom(Arrays.asList("1025M", "2048M", "2G", "8G")); - List expectedChoices = Collections.singletonList("-Dio.netty.allocator.type=pooled"); - assertEquals(expectedChoices, JvmErgonomics.choose(Arrays.asList("-Xms" + largeHeap, "-Xmx" + largeHeap))); + public void testMaxDirectMemorySizeChoice() throws InterruptedException, IOException { + final Map heapMaxDirectMemorySize = Map.of( + "64M", Long.toString((64L << 20) / 2), + "512M", Long.toString((512L << 20) / 2), + "1024M", Long.toString((1024L << 20) / 2), + "1G", Long.toString((1L << 30) / 2), + "2048M", Long.toString((2048L << 20) / 2), + "2G", Long.toString((2L << 30) / 2), + "8G", Long.toString((8L << 30) / 2)); + final String heapSize = randomFrom(heapMaxDirectMemorySize.keySet().toArray(String[]::new)); + assertThat( + JvmErgonomics.choose(Arrays.asList("-Xms" + heapSize, "-Xmx" + heapSize)), + hasItem("-XX:MaxDirectMemorySize=" + heapMaxDirectMemorySize.get(heapSize))); + } + + public void testMaxDirectMemorySizeChoiceWhenSet() throws InterruptedException, IOException { + assertThat( + JvmErgonomics.choose(Arrays.asList("-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=1g")), + everyItem(not(startsWith("-XX:MaxDirectMemorySize=")))); } } From 61c28bc5431b019c7fe62e3b2c4d3414c2bec05f Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 9 May 2019 07:48:23 -0700 Subject: [PATCH 048/321] [DOCS] Updates security configuration overview (#41982) --- .../docs/en/security/configuring-es.asciidoc | 55 ++++++++++--------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index b34e6e0c0e9f2..fdc49ef21e213 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -11,38 +11,31 @@ such as encrypting communications, role-based access control, IP filtering, and auditing. For more information, see {stack-ov}/elasticsearch-security.html[Securing the {stack}]. -To use {es} {security-features}: - -. Verify that you are using a license that includes the {security-features}. +. Verify that you are using a license that includes the specific +{security-features} you want. + -- -If you want to try all of the platinum features, you can start a 30-day trial. -At the end of the trial period, you can purchase a subscription to keep using -the full functionality. For more information, see -https://www.elastic.co/subscriptions and -{stack-ov}/license-management.html[License Management]. +For more information, see https://www.elastic.co/subscriptions and +{stack-ov}/license-management.html[License management]. -- . Verify that the `xpack.security.enabled` setting is `true` on each node in -your cluster. If you are using a trial license, the default value is `false`. -For more information, see {ref}/security-settings.html[Security Settings in {es}]. +your cluster. If you are using basic or trial licenses, the default value is `false`. +For more information, see {ref}/security-settings.html[Security settings in {es}]. . If you plan to run {es} in a Federal Information Processing Standard (FIPS) 140-2 enabled JVM, see <>. -. Configure Transport Layer Security (TLS/SSL) for internode-communication. +. <>. + -- NOTE: This requirement applies to clusters with more than one node and to clusters with a single node that listens on an external interface. Single-node clusters that use a loopback interface do not have this requirement. For more information, see -{stack-ov}/encrypting-communications.html[Encrypting Communications]. +{stack-ov}/encrypting-communications.html[Encrypting communications]. -- -.. <>. - -.. <>. . If it is not already running, start {es}. @@ -72,14 +65,20 @@ user API. -- -. Choose which types of realms you want to use to authenticate users. -** <>. -** <>. -** <>. -** <>. -** <>. -** <>. -** <>. +. Choose which types of realms you want to use to authenticate users. ++ +-- +TIP: The types of authentication realms that you can enable varies according to +your subscription. For more information, see https://www.elastic.co/subscriptions. + +-- +** <> +** <> +** <> +** <> +** <> +** <> +** <> . Set up roles and users to control access to {es}. + @@ -114,10 +113,13 @@ curl -XPOST -u elastic 'localhost:9200/_security/user/johndoe' -H "Content-Type: // NOTCONSOLE -- -. [[enable-auditing]]Enable auditing to keep track of attempted and successful interactions with - your {es} cluster: +. [[enable-auditing]](Optional) Enable auditing to keep track of attempted and +successful interactions with your {es} cluster: + -- +TIP: Audit logging is available with specific subscriptions. For more +information, see https://www.elastic.co/subscriptions. + .. Add the following setting to `elasticsearch.yml` on all nodes in your cluster: + [source,yaml] @@ -134,6 +136,9 @@ Events are logged to a dedicated `_audit.json` file in `ES_HOME/logs`, on each cluster node. -- +To walk through the configuration of {security-features} in {es}, {kib}, {ls}, and {metricbeat}, see +{stack-ov}/security-getting-started.html[Getting started with security]. + :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc include::{es-repo-dir}/security/securing-communications/securing-elasticsearch.asciidoc[] From 2e26a5d4a93042fd0db04d40f4756a5fe9dece4c Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Thu, 9 May 2019 10:54:21 -0400 Subject: [PATCH 049/321] Throw exception if legacy interval cannot be parsed (#41972) Due to the fallthrough logic, DateIntervalWrapper assumed that an otherwise unparsable interval was a legacy fixed millis interval. This could then NPE if the interval was just illegal ("foobar"). This commit correctly checks if the legacy millis parsing fails too, and throws an IllegalArgumentException at that point signaling the provided interval is bad. --- .../bucket/histogram/DateIntervalWrapper.java | 12 ++++++++---- .../histogram/DateHistogramAggregatorTests.java | 10 ++++++++++ .../rest-api-spec/test/rollup/rollup_search.yml | 8 ++------ 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index b86989fce168d..b08782f1fd37a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -288,11 +288,15 @@ public Rounding createRounding(ZoneId timeZone) { } else { // We're not sure what the interval was originally (legacy) so use old behavior of assuming // calendar first, then fixed. Required because fixed/cal overlap in places ("1h") - DateTimeUnit intervalAsUnit = tryIntervalAsCalendarUnit(); - if (intervalAsUnit != null) { - tzRoundingBuilder = Rounding.builder(tryIntervalAsCalendarUnit()); + DateTimeUnit calInterval = tryIntervalAsCalendarUnit(); + TimeValue fixedInterval = tryIntervalAsFixedUnit(); + if (calInterval != null) { + tzRoundingBuilder = Rounding.builder(calInterval); + } else if (fixedInterval != null) { + tzRoundingBuilder = Rounding.builder(fixedInterval); } else { - tzRoundingBuilder = Rounding.builder(tryIntervalAsFixedUnit()); + // If we get here we have exhausted our options and are not able to parse this interval + throw new IllegalArgumentException("Unable to parse interval [" + dateHistogramInterval + "]"); } } if (timeZone != null) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index f671b21eb5e9b..17581b9458413 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -1097,6 +1097,16 @@ public void testLegacyThenNew() throws IOException { assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } + public void testIllegalInterval() throws IOException { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Collections.emptyList(), + aggregation -> aggregation.dateHistogramInterval(new DateHistogramInterval("foobar")).field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Unable to parse interval [foobar]")); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + } + private void testSearchCase(Query query, List dataset, Consumer configure, Consumer verify) throws IOException { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index cc5b778223379..ca04327eab729 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -1246,9 +1246,7 @@ setup: --- "Search error against live index": - - skip: - version: "all" - reason: "AwaitsFix: https://github.com/elastic/elasticsearch/issues/41970" + - do: catch: bad_request rollup.rollup_search: @@ -1264,9 +1262,7 @@ setup: --- "Search error against rollup and live index": - - skip: - version: "all" - reason: "AwaitsFix: https://github.com/elastic/elasticsearch/issues/41970" + - do: catch: bad_request rollup.rollup_search: From 70eb812f83b9244312a1098b21d88a4bd513b1de Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 9 May 2019 17:12:34 +0200 Subject: [PATCH 050/321] Remove Delete Method from BlobStore (#41619) * Remove Delete Method from BlobStore * The delete method on the blob store was used almost nowhere and just duplicates the delete method on the blob containers * The fact that it provided for some recursive delete logic (that did not behave the same way on all implementations) was not used and not properly tested either --- .../common/blobstore/url/URLBlobStore.java | 17 ------- .../repositories/azure/AzureBlobStore.java | 15 ------ .../gcs/GoogleCloudStorageBlobStore.java | 16 +----- .../repositories/hdfs/HdfsBlobStore.java | 8 --- .../repositories/s3/S3BlobStore.java | 49 ------------------- .../common/blobstore/BlobStore.java | 7 --- .../common/blobstore/fs/FsBlobStore.java | 11 ----- .../blobstore/BlobStoreRepository.java | 5 +- .../snapshots/mockstore/BlobStoreWrapper.java | 5 -- .../repositories/ESBlobStoreTestCase.java | 3 -- 10 files changed, 5 insertions(+), 131 deletions(-) diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java index a7042b8bfee2b..8f5ce9b0ffe4f 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java @@ -57,9 +57,6 @@ public URLBlobStore(Settings settings, URL path) { new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes(); } - /** - * {@inheritDoc} - */ @Override public String toString() { return path.toString(); @@ -83,9 +80,6 @@ public int bufferSizeInBytes() { return this.bufferSizeInBytes; } - /** - * {@inheritDoc} - */ @Override public BlobContainer blobContainer(BlobPath path) { try { @@ -95,17 +89,6 @@ public BlobContainer blobContainer(BlobPath path) { } } - /** - * This operation is not supported by URL Blob Store - */ - @Override - public void delete(BlobPath path) { - throw new UnsupportedOperationException("URL repository is read only"); - } - - /** - * {@inheritDoc} - */ @Override public void close() { // nothing to do here... diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 654836ea0fbef..697125fbd537d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -22,8 +22,6 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -40,8 +38,6 @@ import static java.util.Collections.emptyMap; public class AzureBlobStore implements BlobStore { - - private static final Logger logger = LogManager.getLogger(AzureBlobStore.class); private final AzureStorageService service; @@ -82,17 +78,6 @@ public BlobContainer blobContainer(BlobPath path) { return new AzureBlobContainer(path, this); } - @Override - public void delete(BlobPath path) throws IOException { - final String keyPath = path.buildAsString(); - try { - service.deleteFiles(clientName, container, keyPath); - } catch (URISyntaxException | StorageException e) { - logger.warn("cannot access [{}] in container {{}}: {}", keyPath, container, e.getMessage()); - throw new IOException(e); - } - } - @Override public void close() { } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index dab7c9627e6dc..4214e5d408210 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -50,11 +50,11 @@ import java.nio.channels.WritableByteChannel; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; +import java.util.Map; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -91,11 +91,6 @@ public BlobContainer blobContainer(BlobPath path) { return new GoogleCloudStorageBlobContainer(path, this); } - @Override - public void delete(BlobPath path) throws IOException { - deleteBlobsByPrefix(path.buildAsString()); - } - @Override public void close() { } @@ -291,15 +286,6 @@ void deleteBlob(String blobName) throws IOException { } } - /** - * Deletes multiple blobs from the specific bucket all of which have prefixed names - * - * @param prefix prefix of the blobs to delete - */ - private void deleteBlobsByPrefix(String prefix) throws IOException { - deleteBlobsIgnoringIfNotExists(listBlobsByPrefix("", prefix).keySet()); - } - /** * Deletes multiple blobs from the specific bucket using a batch request * diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java index fde7657fe31d0..ad0e663058554 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java @@ -66,14 +66,6 @@ private void mkdirs(Path path) throws IOException { }); } - @Override - public void delete(BlobPath path) throws IOException { - execute((Operation) fc -> { - fc.delete(translateToHdfsPath(path), true); - return null; - }); - } - @Override public String toString() { return root.toUri().toString(); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index d4df4094fcf92..fcded00553580 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -20,10 +20,6 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.DeleteObjectsRequest; -import com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.StorageClass; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobContainer; @@ -33,7 +29,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import java.io.IOException; -import java.util.ArrayList; import java.util.Locale; class S3BlobStore implements BlobStore { @@ -90,50 +85,6 @@ public BlobContainer blobContainer(BlobPath path) { return new S3BlobContainer(path, this); } - @Override - public void delete(BlobPath path) { - try (AmazonS3Reference clientReference = clientReference()) { - ObjectListing prevListing = null; - // From - // http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html - // we can do at most 1K objects per delete - // We don't know the bucket name until first object listing - DeleteObjectsRequest multiObjectDeleteRequest = null; - final ArrayList keys = new ArrayList<>(); - while (true) { - ObjectListing list; - if (prevListing != null) { - final ObjectListing finalPrevListing = prevListing; - list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); - } else { - list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(bucket, path.buildAsString())); - multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); - } - for (final S3ObjectSummary summary : list.getObjectSummaries()) { - keys.add(new KeyVersion(summary.getKey())); - // Every 500 objects batch the delete request - if (keys.size() > 500) { - multiObjectDeleteRequest.setKeys(keys); - final DeleteObjectsRequest finalMultiObjectDeleteRequest = multiObjectDeleteRequest; - SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(finalMultiObjectDeleteRequest)); - multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); - keys.clear(); - } - } - if (list.isTruncated()) { - prevListing = list; - } else { - break; - } - } - if (!keys.isEmpty()) { - multiObjectDeleteRequest.setKeys(keys); - final DeleteObjectsRequest finalMultiObjectDeleteRequest = multiObjectDeleteRequest; - SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(finalMultiObjectDeleteRequest)); - } - } - } - @Override public void close() throws IOException { this.service.close(); diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java index e4cdb148a158e..6ed6722995cca 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.blobstore; import java.io.Closeable; -import java.io.IOException; /** * An interface for storing blobs. @@ -30,10 +29,4 @@ public interface BlobStore extends Closeable { * Get a blob container instance for storing blobs at the given {@link BlobPath}. */ BlobContainer blobContainer(BlobPath path); - - /** - * Delete the blob store at the given {@link BlobPath}. - */ - void delete(BlobPath path) throws IOException; - } diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index 8a4d51e4dc93c..60c39a48e09c1 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.blobstore.fs; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; @@ -72,16 +71,6 @@ public BlobContainer blobContainer(BlobPath path) { } } - @Override - public void delete(BlobPath path) throws IOException { - assert readOnly == false : "should not delete anything from a readonly repository: " + path; - //noinspection ConstantConditions in case assertions are disabled - if (readOnly) { - throw new ElasticsearchException("unexpectedly deleting [" + path + "] from a readonly repository"); - } - IOUtils.rm(buildPath(path)); - } - @Override public void close() { // nothing to do here... diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 0ef1d3ab149f0..5ed73a0058cc5 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -631,7 +631,10 @@ public String startVerification() { public void endVerification(String seed) { if (isReadOnly() == false) { try { - blobStore().delete(basePath().add(testBlobPrefix(seed))); + final String testPrefix = testBlobPrefix(seed); + final BlobContainer container = blobStore().blobContainer(basePath().add(testPrefix)); + container.deleteBlobsIgnoringIfNotExists(List.copyOf(container.listBlobs().keySet())); + blobStore().blobContainer(basePath()).deleteBlobIgnoringIfNotExists(testPrefix); } catch (IOException exp) { throw new RepositoryVerificationException(metadata.name(), "cannot delete test data at " + basePath(), exp); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java index 08e0c6fdcfaed..cdb2ef3ce2dde 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java @@ -37,11 +37,6 @@ public BlobContainer blobContainer(BlobPath path) { return delegate.blobContainer(path); } - @Override - public void delete(BlobPath path) throws IOException { - delegate.delete(path); - } - @Override public void close() throws IOException { delegate.close(); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java index ccc38ae362991..a32d841927360 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java @@ -49,9 +49,6 @@ public void testContainerCreationAndDeletion() throws IOException { assertTrue(containerFoo.blobExists("test")); assertTrue(containerBar.blobExists("test")); - store.delete(new BlobPath()); - assertFalse(containerFoo.blobExists("test")); - assertFalse(containerBar.blobExists("test")); } } From 335ed64b24f9925c9e2106dc16d3f87f0a344395 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Thu, 9 May 2019 17:33:49 +0300 Subject: [PATCH 051/321] mute failing test Tracked in #41256 --- .../gradle/testclusters/TestClustersPluginIT.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 06af46c4e7ed9..84b13340c35cf 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; +import org.junit.Ignore; import java.util.Arrays; @@ -80,7 +81,8 @@ public void testUseClusterBySkippedAndWorkingTask() { "Stopping `node{::myTestCluster-1}`" ); } - + + @Ignore // https://github.com/elastic/elasticsearch/issues/41256 public void testMultiProject() { BuildResult result = getTestClustersRunner( "user1", "user2", "-s", "-i", "--parallel", "-Dlocal.repo.path=" + getLocalTestRepoPath() @@ -158,6 +160,7 @@ public void testConfigurationLocked() { ); } + @Ignore // https://github.com/elastic/elasticsearch/issues/41256 public void testMultiNode() { BuildResult result = getTestClustersRunner(":multiNode").build(); assertTaskSuccessful(result, ":multiNode"); From 3911770aa82112c6362e030b270eaa057da97241 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Thu, 9 May 2019 09:41:23 -0600 Subject: [PATCH 052/321] Fix node close stopwatch usage (#41918) The close method in Node uses a StopWatch to time to closing of various services. However, the call to log the timing was made before any of the services had been closed and therefore no timing would be printed out. This change moves the timing log call to be a closeable that is the last item closed. --- server/src/main/java/org/elasticsearch/node/Node.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 4e8b81aea2e7b..bd7dad26b0c0e 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -841,13 +841,15 @@ public synchronized void close() throws IOException { // Don't call shutdownNow here, it might break ongoing operations on Lucene indices. // See https://issues.apache.org/jira/browse/LUCENE-7248. We call shutdownNow in // awaitClose if the node doesn't finish closing within the specified time. - toClose.add(() -> stopWatch.stop()); + toClose.add(() -> stopWatch.stop().start("node_environment")); toClose.add(injector.getInstance(NodeEnvironment.class)); + toClose.add(() -> stopWatch.stop().start("page_cache_recycler")); toClose.add(injector.getInstance(PageCacheRecycler.class)); + toClose.add(stopWatch::stop); if (logger.isTraceEnabled()) { - logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint()); + toClose.add(() -> logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint())); } IOUtils.close(toClose); logger.info("closed"); From 0531987a0466fd7e4af367c76cf6b8911b4df51c Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Thu, 9 May 2019 10:51:53 -0500 Subject: [PATCH 053/321] [ML] verify that there are no duplicate leaf fields in aggs (#41895) * [ML] verify that there are no duplicate leaf fields in aggs * addressing pr comments * addressing PR comments * optmizing duplication check --- .../action/PutDataFrameTransformAction.java | 8 +- .../transforms/pivot/PivotConfig.java | 62 ++++++++++++++++ .../transforms/pivot/PivotConfigTests.java | 73 ++++++++++++++++++- .../test/data_frame/transforms_crud.yml | 32 ++++++++ 4 files changed, 172 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java index 0f6cc63f98851..059bad3494c07 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java @@ -20,6 +20,8 @@ import java.io.IOException; import java.util.Objects; +import static org.elasticsearch.action.ValidateActions.addValidationError; + public class PutDataFrameTransformAction extends Action { public static final PutDataFrameTransformAction INSTANCE = new PutDataFrameTransformAction(); @@ -53,7 +55,11 @@ public static Request fromXContent(final XContentParser parser, final String id) @Override public ActionRequestValidationException validate() { - return null; + ActionRequestValidationException validationException = null; + for(String failure : config.getPivotConfig().aggFieldValidation()) { + validationException = addValidationError(failure, validationException); + } + return validationException; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java index c1c894e2971ae..79a0a7fc1bfa8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java @@ -13,11 +13,17 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Map.Entry; import java.util.Objects; @@ -141,7 +147,63 @@ public boolean isValid() { return groups.isValid() && aggregationConfig.isValid(); } + public List aggFieldValidation() { + if ((aggregationConfig.isValid() && groups.isValid()) == false) { + return Collections.emptyList(); + } + List usedNames = new ArrayList<>(); + // TODO this will need to change once we allow multi-bucket aggs + field merging + aggregationConfig.getAggregatorFactories().forEach(agg -> addAggNames(agg, usedNames)); + aggregationConfig.getPipelineAggregatorFactories().forEach(agg -> addAggNames(agg, usedNames)); + usedNames.addAll(groups.getGroups().keySet()); + return aggFieldValidation(usedNames); + } + public static PivotConfig fromXContent(final XContentParser parser, boolean lenient) throws IOException { return lenient ? LENIENT_PARSER.apply(parser, null) : STRICT_PARSER.apply(parser, null); } + + /** + * Does the following checks: + * + * - determines if there are any full duplicate names between the aggregation names and the group by names. + * - finds if there are conflicting name paths that could cause a failure later when the config is started. + * + * Examples showing conflicting field name paths: + * + * aggName1: foo.bar.baz + * aggName2: foo.bar + * + * This should fail as aggName1 will cause foo.bar to be an object, causing a conflict with the use of foo.bar in aggName2. + * @param usedNames The aggregation and group_by names + * @return List of validation failure messages + */ + static List aggFieldValidation(List usedNames) { + if (usedNames == null || usedNames.isEmpty()) { + return Collections.emptyList(); + } + List validationFailures = new ArrayList<>(); + + usedNames.sort(String::compareTo); + for (int i = 0; i < usedNames.size() - 1; i++) { + if (usedNames.get(i+1).startsWith(usedNames.get(i) + ".")) { + validationFailures.add("field [" + usedNames.get(i) + "] cannot be both an object and a field"); + } + if (usedNames.get(i+1).equals(usedNames.get(i))) { + validationFailures.add("duplicate field [" + usedNames.get(i) + "] detected"); + } + } + return validationFailures; + } + + + private static void addAggNames(AggregationBuilder aggregationBuilder, Collection names) { + names.add(aggregationBuilder.getName()); + aggregationBuilder.getSubAggregations().forEach(agg -> addAggNames(agg, names)); + aggregationBuilder.getPipelineAggregations().forEach(agg -> addAggNames(agg, names)); + } + + private static void addAggNames(PipelineAggregationBuilder pipelineAggregationBuilder, Collection names) { + names.add(pipelineAggregationBuilder.getName()); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java index 1586ea540f4b4..342e007f21284 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.XContentParser; @@ -13,6 +14,12 @@ import org.elasticsearch.xpack.core.dataframe.transforms.AbstractSerializingDataFrameTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; public class PivotConfigTests extends AbstractSerializingDataFrameTestCase { @@ -103,7 +110,7 @@ public void testEmptyGroupBy() throws IOException { assertFalse(pivotConfig.isValid()); } - public void testMissingGroupBy() throws IOException { + public void testMissingGroupBy() { String pivot = "{" + " \"aggs\": {" + " \"avg\": {" @@ -114,7 +121,7 @@ public void testMissingGroupBy() throws IOException { expectThrows(IllegalArgumentException.class, () -> createPivotConfigFromString(pivot, false)); } - public void testDoubleAggs() throws IOException { + public void testDoubleAggs() { String pivot = "{" + " \"group_by\": {" + " \"id\": {" @@ -136,6 +143,68 @@ public void testDoubleAggs() throws IOException { expectThrows(IllegalArgumentException.class, () -> createPivotConfigFromString(pivot, false)); } + public void testValidAggNames() throws IOException { + String pivotAggs = "{" + + " \"group_by\": {" + + " \"user.id.field\": {" + + " \"terms\": {" + + " \"field\": \"id\"" + + "} } }," + + " \"aggs\": {" + + " \"avg.field.value\": {" + + " \"avg\": {" + + " \"field\": \"points\"" + + "} } } }"; + PivotConfig pivotConfig = createPivotConfigFromString(pivotAggs, true); + assertTrue(pivotConfig.isValid()); + List fieldValidation = pivotConfig.aggFieldValidation(); + assertTrue(fieldValidation.isEmpty()); + } + + public void testAggNameValidationsWithoutIssues() { + String prefix = randomAlphaOfLength(10) + "1"; + String prefix2 = randomAlphaOfLength(10) + "2"; + String nestedField1 = randomAlphaOfLength(10) + "3"; + String nestedField2 = randomAlphaOfLength(10) + "4"; + + assertThat(PivotConfig.aggFieldValidation(Arrays.asList(prefix + nestedField1 + nestedField2, + prefix + nestedField1, + prefix, + prefix2)), is(empty())); + + assertThat(PivotConfig.aggFieldValidation( + Arrays.asList( + dotJoin(prefix, nestedField1, nestedField2), + dotJoin(nestedField1, nestedField2), + nestedField2, + prefix2)), is(empty())); + } + + public void testAggNameValidationsWithDuplicatesAndNestingIssues() { + String prefix = randomAlphaOfLength(10) + "1"; + String prefix2 = randomAlphaOfLength(10) + "2"; + String nestedField1 = randomAlphaOfLength(10) + "3"; + String nestedField2 = randomAlphaOfLength(10) + "4"; + + List failures = PivotConfig.aggFieldValidation( + Arrays.asList( + dotJoin(prefix, nestedField1, nestedField2), + dotJoin(prefix, nestedField2), + dotJoin(prefix, nestedField1), + dotJoin(prefix2, nestedField1), + dotJoin(prefix2, nestedField1), + prefix2)); + + assertThat(failures, + containsInAnyOrder("duplicate field [" + dotJoin(prefix2, nestedField1) + "] detected", + "field [" + prefix2 + "] cannot be both an object and a field", + "field [" + dotJoin(prefix, nestedField1) + "] cannot be both an object and a field")); + } + + private static String dotJoin(String... fields) { + return Strings.arrayToDelimitedString(fields, "."); + } + private PivotConfig createPivotConfigFromString(String json, boolean lenient) throws IOException { final XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml index fa608cefd1eb3..40af091a91bd9 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml @@ -302,3 +302,35 @@ setup: "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } +--- +"Test creation failures due to duplicate and conflicting field names": + - do: + catch: /duplicate field \[airline\] detected/ + data_frame.put_data_frame_transform: + transform_id: "duplicate-field-transform" + body: > + { + "source": { + "index": "source-index" + }, + "dest": { "index": "dest-index" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"airline": {"avg": {"field": "responsetime"}}} + } + } + - do: + catch: /field \[airline\] cannot be both an object and a field/ + data_frame.put_data_frame_transform: + transform_id: "duplicate-field-transform" + body: > + { + "source": { + "index": "source-index" + }, + "dest": { "index": "dest-index" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"airline.responsetime": {"avg": {"field": "responsetime"}}} + } + } From 8270c801614418ac2647ecabc6cb0d2b4d376ef2 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Thu, 9 May 2019 12:31:47 -0700 Subject: [PATCH 054/321] Refactor TransportSingleShardAction to serialize Writeable responses (#41985) Previously, TransportSingleShardAction required constructing a new empty response object. This response object's Streamable readFrom was used. As part of the migration to Writeable, the interface here was updated to leverage Writeable.Reader. relates to #34389. --- .../action/PainlessExecuteAction.java | 16 +++--- .../action/PainlessExecuteResponseTests.java | 50 +++++++++++++++++-- .../indices/analyze/AnalyzeResponse.java | 5 +- .../analyze/TransportAnalyzeAction.java | 5 -- .../mapping/get/GetFieldMappingsAction.java | 8 ++- .../mapping/get/GetFieldMappingsResponse.java | 44 +++++++++------- .../TransportGetFieldMappingsIndexAction.java | 5 +- .../action/explain/ExplainAction.java | 8 ++- .../action/explain/ExplainResponse.java | 27 ++++++---- .../explain/TransportExplainAction.java | 5 +- ...TransportFieldCapabilitiesIndexAction.java | 5 +- .../elasticsearch/action/get/GetAction.java | 8 ++- .../elasticsearch/action/get/GetResponse.java | 7 +-- .../action/get/MultiGetItemResponse.java | 5 +- .../action/get/MultiGetShardResponse.java | 46 +++++++++-------- .../action/get/TransportGetAction.java | 5 +- .../get/TransportShardMultiGetAction.java | 5 +- .../shard/TransportSingleShardAction.java | 11 +--- .../MultiTermVectorsItemResponse.java | 5 +- .../MultiTermVectorsShardResponse.java | 46 +++++++++-------- .../action/termvectors/TermVectorsAction.java | 8 ++- .../termvectors/TermVectorsResponse.java | 26 ++++++---- .../TransportShardMultiTermsVectorAction.java | 5 +- .../TransportTermVectorsAction.java | 5 +- .../index/seqno/RetentionLeaseActions.java | 15 +++++- .../get/GetFieldMappingsResponseTests.java | 16 +++--- .../action/explain/ExplainResponseTests.java | 10 ++-- .../termvectors/TermVectorsUnitTests.java | 6 +-- .../xpack/ccr/action/ShardChangesAction.java | 34 ++++++++----- .../PutCcrRestoreSessionAction.java | 4 +- .../ccr/action/ShardChangesResponseTests.java | 10 ++-- 31 files changed, 276 insertions(+), 179 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index 217b5fc76588a..cb407978da83e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -101,7 +101,7 @@ private PainlessExecuteAction() { @Override public Response newResponse() { - return new Response(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } public static class Request extends SingleShardRequest implements ToXContentObject { @@ -381,20 +381,22 @@ public static class Response extends ActionResponse implements ToXContentObject private Object result; - Response() {} - Response(Object result) { this.result = result; } + Response(StreamInput in) throws IOException { + super(in); + result = in.readGenericValue(); + } + public Object getResult() { return result; } @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - result = in.readGenericValue(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -469,8 +471,8 @@ public TransportAction(ThreadPool threadPool, TransportService transportService, } @Override - protected Response newResponse() { - return new Response(); + protected Writeable.Reader getResponseReader() { + return Response::new; } @Override diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java index c75497bd630e5..ed75caff76b50 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java @@ -18,17 +18,57 @@ */ package org.elasticsearch.painless.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; -public class PainlessExecuteResponseTests extends AbstractStreamableTestCase { +import java.io.IOException; + +public class PainlessExecuteResponseTests extends AbstractSerializingTestCase { @Override - protected PainlessExecuteAction.Response createBlankInstance() { - return new PainlessExecuteAction.Response(); + protected Writeable.Reader instanceReader() { + return PainlessExecuteAction.Response::new; } @Override protected PainlessExecuteAction.Response createTestInstance() { - return new PainlessExecuteAction.Response(randomAlphaOfLength(10)); + Object result; + switch (randomIntBetween(0, 2)) { + case 0: + result = randomAlphaOfLength(10); + break; + case 1: + result = randomBoolean(); + break; + case 2: + result = randomDoubleBetween(-10, 10, true); + break; + default: + throw new IllegalStateException("invalid branch"); + } + return new PainlessExecuteAction.Response(result); + } + + @Override + protected PainlessExecuteAction.Response doParseInstance(XContentParser parser) throws IOException { + parser.nextToken(); // START-OBJECT + parser.nextToken(); // FIELD-NAME + XContentParser.Token token = parser.nextToken(); // result value + Object result; + switch (token) { + case VALUE_STRING: + result = parser.text(); + break; + case VALUE_BOOLEAN: + result = parser.booleanValue(); + break; + case VALUE_NUMBER: + result = parser.doubleValue(); + break; + default: + throw new IOException("invalid response"); + } + return new PainlessExecuteAction.Response(result); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java index 945c2128bab39..7e6d525cefb93 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java @@ -71,7 +71,7 @@ public int hashCode() { return Objects.hash(term, startOffset, endOffset, position, positionLength, attributes, type); } - public AnalyzeToken(String term, int position, int startOffset, int endOffset, int positionLength, + AnalyzeToken(String term, int position, int startOffset, int endOffset, int positionLength, String type, Map attributes) { this.term = term; this.position = position; @@ -82,7 +82,7 @@ public AnalyzeToken(String term, int position, int startOffset, int endOffset, i this.attributes = attributes; } - public AnalyzeToken(StreamInput in) throws IOException { + AnalyzeToken(StreamInput in) throws IOException { term = in.readString(); startOffset = in.readInt(); endOffset = in.readInt(); @@ -203,7 +203,6 @@ public void writeTo(StreamOutput out) throws IOException { } private final DetailAnalyzeResponse detail; - private final List tokens; public AnalyzeResponse(List tokens, DetailAnalyzeResponse detail) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 07f445b6fc74a..62d8c0e91da79 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -96,11 +96,6 @@ public TransportAnalyzeAction(Settings settings, ThreadPool threadPool, ClusterS this.environment = environment; } - @Override - protected AnalyzeResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override protected Writeable.Reader getResponseReader() { return AnalyzeResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java index 5aa19652b676d..d372d8cf93f30 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class GetFieldMappingsAction extends Action { @@ -32,6 +33,11 @@ private GetFieldMappingsAction() { @Override public GetFieldMappingsResponse newResponse() { - return new GetFieldMappingsResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return GetFieldMappingsResponse::new; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index d3200bc1e1d9a..e3be9e6834287 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -92,9 +92,33 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte this.mappings = mappings; } + GetFieldMappingsResponse() { } + GetFieldMappingsResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + Map>> indexMapBuilder = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String index = in.readString(); + int typesSize = in.readVInt(); + Map> typeMapBuilder = new HashMap<>(typesSize); + for (int j = 0; j < typesSize; j++) { + String type = in.readString(); + int fieldSize = in.readVInt(); + Map fieldMapBuilder = new HashMap<>(fieldSize); + for (int k = 0; k < fieldSize; k++) { + fieldMapBuilder.put(in.readString(), new FieldMappingMetaData(in.readString(), in.readBytesReference())); + } + typeMapBuilder.put(type, unmodifiableMap(fieldMapBuilder)); + } + indexMapBuilder.put(index, unmodifiableMap(typeMapBuilder)); + } + mappings = unmodifiableMap(indexMapBuilder); + + } + /** returns the retrieved field mapping. The return map keys are index, type, field (as specified in the request). */ public Map>> mappings() { return mappings; @@ -269,25 +293,7 @@ public int hashCode() { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - Map>> indexMapBuilder = new HashMap<>(size); - for (int i = 0; i < size; i++) { - String index = in.readString(); - int typesSize = in.readVInt(); - Map> typeMapBuilder = new HashMap<>(typesSize); - for (int j = 0; j < typesSize; j++) { - String type = in.readString(); - int fieldSize = in.readVInt(); - Map fieldMapBuilder = new HashMap<>(fieldSize); - for (int k = 0; k < fieldSize; k++) { - fieldMapBuilder.put(in.readString(), new FieldMappingMetaData(in.readString(), in.readBytesReference())); - } - typeMapBuilder.put(type, unmodifiableMap(fieldMapBuilder)); - } - indexMapBuilder.put(index, unmodifiableMap(typeMapBuilder)); - } - mappings = unmodifiableMap(indexMapBuilder); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index c7415391675fc..61a598c361cc9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentHelper; @@ -123,8 +124,8 @@ protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexReq } @Override - protected GetFieldMappingsResponse newResponse() { - return new GetFieldMappingsResponse(); + protected Writeable.Reader getResponseReader() { + return GetFieldMappingsResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java index 13c9d94e7dbc7..ba5618ce7de21 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.explain; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; /** * Entry point for the explain feature. @@ -35,6 +36,11 @@ private ExplainAction() { @Override public ExplainResponse newResponse() { - return new ExplainResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return ExplainResponse::new; } } diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java index 5cecdf3a8b6b3..8ea7d0f12e341 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java @@ -60,6 +60,7 @@ public class ExplainResponse extends ActionResponse implements StatusToXContentO private Explanation explanation; private GetResult getResult; + // TODO(talevy): remove dependency on empty constructor from ExplainResponseTests ExplainResponse() { } @@ -80,6 +81,20 @@ public ExplainResponse(String index, String type, String id, boolean exists, Exp this.getResult = getResult; } + public ExplainResponse(StreamInput in) throws IOException { + super(in); + index = in.readString(); + type = in.readString(); + id = in.readString(); + exists = in.readBoolean(); + if (in.readBoolean()) { + explanation = readExplanation(in); + } + if (in.readBoolean()) { + getResult = GetResult.readGetResult(in); + } + } + public String getIndex() { return index; } @@ -123,17 +138,7 @@ public RestStatus status() { @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - index = in.readString(); - type = in.readString(); - id = in.readString(); - exists = in.readBoolean(); - if (in.readBoolean()) { - explanation = readExplanation(in); - } - if (in.readBoolean()) { - getResult = GetResult.readGetResult(in); - } + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index fe8475322592f..c29da21fe4afe 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; @@ -152,8 +153,8 @@ protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId } @Override - protected ExplainResponse newResponse() { - return new ExplainResponse(); + protected Writeable.Reader getResponseReader() { + return ExplainResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index 01c21544047ed..274633b12a613 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ObjectMapper; @@ -114,8 +115,8 @@ protected FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesI } @Override - protected FieldCapabilitiesIndexResponse newResponse() { - return new FieldCapabilitiesIndexResponse(); + protected Writeable.Reader getResponseReader() { + return FieldCapabilitiesIndexResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/get/GetAction.java b/server/src/main/java/org/elasticsearch/action/get/GetAction.java index a622fd5a8178b..05d1b6c5a4c02 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.get; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class GetAction extends Action { @@ -32,6 +33,11 @@ private GetAction() { @Override public GetResponse newResponse() { - return new GetResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return GetResponse::new; } } diff --git a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java index b9383785678b7..3d340d455ceb8 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java @@ -48,7 +48,9 @@ public class GetResponse extends ActionResponse implements Iterable responses; - List failures; + final IntArrayList locations; + final List responses; + final List failures; MultiGetShardResponse() { locations = new IntArrayList(); @@ -40,21 +40,8 @@ public class MultiGetShardResponse extends ActionResponse { failures = new ArrayList<>(); } - public void add(int location, GetResponse response) { - locations.add(location); - responses.add(response); - failures.add(null); - } - - public void add(int location, MultiGetResponse.Failure failure) { - locations.add(location); - responses.add(null); - failures.add(failure); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + MultiGetShardResponse(StreamInput in) throws IOException { + super(in); int size = in.readVInt(); locations = new IntArrayList(size); responses = new ArrayList<>(size); @@ -62,9 +49,7 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < size; i++) { locations.add(in.readVInt()); if (in.readBoolean()) { - GetResponse response = new GetResponse(); - response.readFrom(in); - responses.add(response); + responses.add(new GetResponse(in)); } else { responses.add(null); } @@ -76,6 +61,23 @@ public void readFrom(StreamInput in) throws IOException { } } + public void add(int location, GetResponse response) { + locations.add(location); + responses.add(response); + failures.add(null); + } + + public void add(int location, MultiGetResponse.Failure failure) { + locations.add(location); + responses.add(null); + failures.add(failure); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -96,4 +98,4 @@ public void writeTo(StreamOutput out) throws IOException { } } } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index 7bc736b69f38c..65f42835f7374 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.shard.IndexShard; @@ -108,8 +109,8 @@ protected GetResponse shardOperation(GetRequest request, ShardId shardId) { } @Override - protected GetResponse newResponse() { - return new GetResponse(); + protected Writeable.Reader getResponseReader() { + return GetResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index 6c48b3b87c59f..9b8ea6bd6cac5 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.shard.IndexShard; @@ -57,8 +58,8 @@ protected boolean isSubAction() { } @Override - protected MultiGetShardResponse newResponse() { - return new MultiGetShardResponse(); + protected Writeable.Reader getResponseReader() { + return MultiGetShardResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 8b0e69bd457c8..3c2e7f9a49e0d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -120,16 +120,7 @@ protected void doRun() throws Exception { }); } - @Deprecated - protected abstract Response newResponse(); - - protected Writeable.Reader getResponseReader() { - return in -> { - Response response = newResponse(); - response.readFrom(in); - return response; - }; - } + protected abstract Writeable.Reader getResponseReader(); protected abstract boolean resolveIndex(Request request); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java index 3e32af7f2c250..14ac59cb132bd 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java @@ -105,8 +105,7 @@ public void readFrom(StreamInput in) throws IOException { if (in.readBoolean()) { failure = MultiTermVectorsResponse.Failure.readFailure(in); } else { - response = new TermVectorsResponse(); - response.readFrom(in); + response = new TermVectorsResponse(in); } } @@ -120,4 +119,4 @@ public void writeTo(StreamOutput out) throws IOException { response.writeTo(out); } } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java index 346274c5925b3..2290ee9f52e42 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardResponse.java @@ -30,9 +30,9 @@ public class MultiTermVectorsShardResponse extends ActionResponse { - IntArrayList locations; - List responses; - List failures; + final IntArrayList locations; + final List responses; + final List failures; MultiTermVectorsShardResponse() { locations = new IntArrayList(); @@ -40,21 +40,8 @@ public class MultiTermVectorsShardResponse extends ActionResponse { failures = new ArrayList<>(); } - public void add(int location, TermVectorsResponse response) { - locations.add(location); - responses.add(response); - failures.add(null); - } - - public void add(int location, MultiTermVectorsResponse.Failure failure) { - locations.add(location); - responses.add(null); - failures.add(failure); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + MultiTermVectorsShardResponse(StreamInput in) throws IOException { + super(in); int size = in.readVInt(); locations = new IntArrayList(size); responses = new ArrayList<>(size); @@ -62,9 +49,7 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < size; i++) { locations.add(in.readVInt()); if (in.readBoolean()) { - TermVectorsResponse response = new TermVectorsResponse(); - response.readFrom(in); - responses.add(response); + responses.add(new TermVectorsResponse(in)); } else { responses.add(null); } @@ -76,6 +61,23 @@ public void readFrom(StreamInput in) throws IOException { } } + public void add(int location, TermVectorsResponse response) { + locations.add(location); + responses.add(response); + failures.add(null); + } + + public void add(int location, MultiTermVectorsResponse.Failure failure) { + locations.add(location); + responses.add(null); + failures.add(failure); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -96,4 +98,4 @@ public void writeTo(StreamOutput out) throws IOException { } } } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java index e701efe93ba7a..9b223eed3a3c8 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.Writeable; public class TermVectorsAction extends Action { @@ -32,6 +33,11 @@ private TermVectorsAction() { @Override public TermVectorsResponse newResponse() { - return new TermVectorsResponse(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return TermVectorsResponse::new; } } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index 9159a07e83c03..3d0fb75f8d3eb 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -103,6 +103,20 @@ public TermVectorsResponse(String index, String type, String id) { TermVectorsResponse() { } + TermVectorsResponse(StreamInput in) throws IOException { + index = in.readString(); + type = in.readString(); + id = in.readString(); + docVersion = in.readVLong(); + exists = in.readBoolean(); + artificial = in.readBoolean(); + tookInMillis = in.readVLong(); + if (in.readBoolean()) { + headerRef = in.readBytesReference(); + termVectors = in.readBytesReference(); + } + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); @@ -127,17 +141,7 @@ private boolean hasTermVectors() { @Override public void readFrom(StreamInput in) throws IOException { - index = in.readString(); - type = in.readString(); - id = in.readString(); - docVersion = in.readVLong(); - exists = in.readBoolean(); - artificial = in.readBoolean(); - tookInMillis = in.readVLong(); - if (in.readBoolean()) { - headerRef = in.readBytesReference(); - termVectors = in.readBytesReference(); - } + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } public Fields getFields() throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index e8d6c1bcb4ff6..0292757121eff 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; @@ -58,8 +59,8 @@ protected boolean isSubAction() { } @Override - protected MultiTermVectorsShardResponse newResponse() { - return new MultiTermVectorsShardResponse(); + protected Writeable.Reader getResponseReader() { + return MultiTermVectorsShardResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java index d87a08a0541aa..0e212ab7cce2f 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; @@ -110,8 +111,8 @@ protected TermVectorsResponse shardOperation(TermVectorsRequest request, ShardId } @Override - protected TermVectorsResponse newResponse() { - return new TermVectorsResponse(); + protected Writeable.Reader getResponseReader() { + return TermVectorsResponse::new; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java index dfa985cc5a684..c69a4c6fab042 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -119,8 +120,8 @@ protected Response shardOperation(final T request, final ShardId shardId) { abstract void doRetentionLeaseAction(IndexShard indexShard, T request, ActionListener listener); @Override - protected Response newResponse() { - return new Response(); + protected Writeable.Reader getResponseReader() { + return Response::new; } @Override @@ -169,6 +170,10 @@ void doRetentionLeaseAction(final IndexShard indexShard, final AddRequest reques ActionListener.map(listener, r -> new Response())); } + @Override + protected Writeable.Reader getResponseReader() { + return Response::new; + } } @Override @@ -392,6 +397,12 @@ public RemoveRequest(final ShardId shardId, final String id) { public static class Response extends ActionResponse { + public Response() { + } + + Response(StreamInput in) throws IOException { + super(in); + } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java index 2b8db458eb82f..472b4ddb4a3d1 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java @@ -23,11 +23,12 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.Collections; @@ -38,7 +39,7 @@ import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; import static org.hamcrest.CoreMatchers.equalTo; -public class GetFieldMappingsResponseTests extends AbstractStreamableXContentTestCase { +public class GetFieldMappingsResponseTests extends AbstractSerializingTestCase { public void testManualSerialization() throws IOException { Map>> mappings = new HashMap<>(); @@ -48,9 +49,8 @@ public void testManualSerialization() throws IOException { try (BytesStreamOutput out = new BytesStreamOutput()) { response.writeTo(out); - GetFieldMappingsResponse serialized = new GetFieldMappingsResponse(); try (StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes)) { - serialized.readFrom(in); + GetFieldMappingsResponse serialized = new GetFieldMappingsResponse(in); FieldMappingMetaData metaData = serialized.fieldMappings("index", "type", "field"); assertNotNull(metaData); assertEquals(new BytesArray("{}"), metaData.getSource()); @@ -106,13 +106,13 @@ protected GetFieldMappingsResponse doParseInstance(XContentParser parser) throws } @Override - protected GetFieldMappingsResponse createBlankInstance() { - return new GetFieldMappingsResponse(); + protected GetFieldMappingsResponse createTestInstance() { + return new GetFieldMappingsResponse(randomMapping()); } @Override - protected GetFieldMappingsResponse createTestInstance() { - return new GetFieldMappingsResponse(randomMapping()); + protected Writeable.Reader instanceReader() { + return GetFieldMappingsResponse::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java b/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java index 2a04a97667722..9f1ee08844b66 100644 --- a/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java @@ -23,13 +23,14 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.get.GetResult; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.RandomObjects; import java.io.IOException; @@ -42,15 +43,16 @@ import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.equalTo; -public class ExplainResponseTests extends AbstractStreamableXContentTestCase { +public class ExplainResponseTests extends AbstractSerializingTestCase { + @Override protected ExplainResponse doParseInstance(XContentParser parser) throws IOException { return ExplainResponse.fromXContent(parser, randomBoolean()); } @Override - protected ExplainResponse createBlankInstance() { - return new ExplainResponse(); + protected Writeable.Reader instanceReader() { + return ExplainResponse::new; } @Override diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java index 0cd9d3130f176..8ab452950a87a 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java @@ -77,8 +77,7 @@ public void testStreamResponse() throws Exception { // read ByteArrayInputStream esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); InputStreamStreamInput esBuffer = new InputStreamStreamInput(esInBuffer); - TermVectorsResponse inResponse = new TermVectorsResponse("a", "b", "c"); - inResponse.readFrom(esBuffer); + TermVectorsResponse inResponse = new TermVectorsResponse(esBuffer); // see if correct checkIfStandardTermVector(inResponse); @@ -93,8 +92,7 @@ public void testStreamResponse() throws Exception { // read esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); esBuffer = new InputStreamStreamInput(esInBuffer); - inResponse = new TermVectorsResponse("a", "b", "c"); - inResponse.readFrom(esBuffer); + inResponse = new TermVectorsResponse(esBuffer); assertTrue(inResponse.isExists()); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 0ef14115eab32..33b8a274431d2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; @@ -61,7 +62,12 @@ private ShardChangesAction() { @Override public Response newResponse() { - return new Response(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return Response::new; } public static class Request extends SingleShardRequest { @@ -246,6 +252,17 @@ public long getTookInMillis() { Response() { } + Response(StreamInput in) throws IOException { + super(in); + mappingVersion = in.readVLong(); + settingsVersion = in.readVLong(); + globalCheckpoint = in.readZLong(); + maxSeqNo = in.readZLong(); + maxSeqNoOfUpdatesOrDeletes = in.readZLong(); + operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new); + tookInMillis = in.readVLong(); + } + Response( final long mappingVersion, final long settingsVersion, @@ -265,15 +282,8 @@ public long getTookInMillis() { } @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); - mappingVersion = in.readVLong(); - settingsVersion = in.readVLong(); - globalCheckpoint = in.readZLong(); - maxSeqNo = in.readZLong(); - maxSeqNoOfUpdatesOrDeletes = in.readZLong(); - operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new); - tookInMillis = in.readVLong(); + public void readFrom(final StreamInput in) { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -459,8 +469,8 @@ protected ShardsIterator shards(ClusterState state, InternalRequest request) { } @Override - protected Response newResponse() { - return new Response(); + protected Writeable.Reader getResponseReader() { + return Response::new; } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java index 393548225a0c2..91ec057ac4eb6 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java @@ -77,8 +77,8 @@ protected PutCcrRestoreSessionResponse shardOperation(PutCcrRestoreSessionReques } @Override - protected PutCcrRestoreSessionResponse newResponse() { - return new PutCcrRestoreSessionResponse(); + protected Writeable.Reader getResponseReader() { + return PutCcrRestoreSessionResponse::new; } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java index 0e48fc8e57ca3..a5b28caf9dfb2 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java @@ -5,10 +5,11 @@ */ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; -public class ShardChangesResponseTests extends AbstractStreamableTestCase { +public class ShardChangesResponseTests extends AbstractWireSerializingTestCase { @Override protected ShardChangesAction.Response createTestInstance() { @@ -34,8 +35,7 @@ protected ShardChangesAction.Response createTestInstance() { } @Override - protected ShardChangesAction.Response createBlankInstance() { - return new ShardChangesAction.Response(); + protected Writeable.Reader instanceReader() { + return ShardChangesAction.Response::new; } - } From 368e5a1194c2d2f762c117dd84397bb65f835dec Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Thu, 9 May 2019 15:35:37 -0500 Subject: [PATCH 055/321] fix unlikely bug that can prevent Watcher from restarting (#42030) The bug fixed here is unlikely to happen. It requires ES to be started with ILM disabled, Watcher enabled, and Watcher explicitly stopped and restarted. Due to template validation Watcher does not fully start and can result in a partially started state. This is an unlikely scenerio outside of the testing framework. Note - this bug was introduced while the test that would have caught it was muted. The test remains muted since the underlying cuase of the random failures has not been identified. When this test is un-muted it will now work. --- .../xpack/watcher/support/WatcherIndexTemplateRegistry.java | 3 ++- .../smoketest/SmokeTestWatcherWithSecurityIT.java | 2 +- .../xpack/test/rest/XPackRestTestConstants.java | 5 +---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java index 9f5027f7a0f7a..be844d5d1e428 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java @@ -79,7 +79,8 @@ protected String getOrigin() { } public static boolean validate(ClusterState state) { - return state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME) && + return (state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME) || + state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME_NO_ILM)) && state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME) && state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME); } diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 902115e82925d..bf53dfa83103e 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -83,7 +83,7 @@ public void startWatcher() throws Exception { }); assertBusy(() -> { - for (String template : XPackRestTestConstants.TEMPLATE_NAMES) { + for (String template : XPackRestTestConstants.TEMPLATE_NAMES_NO_ILM) { assertOK(adminClient().performRequest(new Request("HEAD", "_template/" + template))); } }); diff --git a/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java index 1a6a59fbc696b..bfdf051a29235 100644 --- a/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java +++ b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java @@ -11,13 +11,10 @@ public final class XPackRestTestConstants { // Watcher constants: public static final String INDEX_TEMPLATE_VERSION = "9"; - public static final String HISTORY_TEMPLATE_NAME = ".watch-history-" + INDEX_TEMPLATE_VERSION; public static final String HISTORY_TEMPLATE_NAME_NO_ILM = ".watch-history-no-ilm-" + INDEX_TEMPLATE_VERSION; public static final String TRIGGERED_TEMPLATE_NAME = ".triggered_watches"; public static final String WATCHES_TEMPLATE_NAME = ".watches"; - public static final String[] TEMPLATE_NAMES = new String[] { - HISTORY_TEMPLATE_NAME, TRIGGERED_TEMPLATE_NAME, WATCHES_TEMPLATE_NAME - }; + public static final String[] TEMPLATE_NAMES_NO_ILM = new String[] { HISTORY_TEMPLATE_NAME_NO_ILM, TRIGGERED_TEMPLATE_NAME, WATCHES_TEMPLATE_NAME }; From 9f39879a49265d6ca8fcac47a991224e7da151e6 Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Thu, 9 May 2019 14:37:55 -0600 Subject: [PATCH 056/321] Remove toStepKeys from LifecycleAction (#41775) The `toStepKeys()` method was only called in its own test case. The real list of StepKeys that's used in action execution is generated from the list of actual step objects returned by `toSteps()`. This commit removes that method. --- .../core/indexlifecycle/AllocateAction.java | 7 ------- .../xpack/core/indexlifecycle/DeleteAction.java | 9 --------- .../core/indexlifecycle/ForceMergeAction.java | 8 -------- .../xpack/core/indexlifecycle/FreezeAction.java | 6 ------ .../core/indexlifecycle/LifecycleAction.java | 10 ---------- .../core/indexlifecycle/ReadOnlyAction.java | 6 ------ .../core/indexlifecycle/RolloverAction.java | 9 --------- .../core/indexlifecycle/SetPriorityAction.java | 5 ----- .../xpack/core/indexlifecycle/ShrinkAction.java | 16 ---------------- .../core/indexlifecycle/UnfollowAction.java | 13 ------------- .../indexlifecycle/AbstractActionTestCase.java | 16 ---------------- .../xpack/core/indexlifecycle/MockAction.java | 8 +------- 12 files changed, 1 insertion(+), 112 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java index 7843fa7d86e0c..cbfec7b91029f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java @@ -149,13 +149,6 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { return Arrays.asList(allocateStep, routedCheckStep); } - @Override - public List toStepKeys(String phase) { - StepKey allocateKey = new StepKey(phase, NAME, NAME); - StepKey allocationRoutedKey = new StepKey(phase, NAME, AllocationRoutedStep.NAME); - return Arrays.asList(allocateKey, allocationRoutedKey); - } - @Override public int hashCode() { return Objects.hash(numberOfReplicas, include, exclude, require); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java index b61534e497067..ae8eaef670916 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import java.io.IOException; import java.util.Arrays; @@ -67,14 +66,6 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) return Arrays.asList(waitForNoFollowersStep, deleteStep); } - @Override - public List toStepKeys(String phase) { - Step.StepKey waitForNoFollowerStepKey = new Step.StepKey(phase, NAME, WaitForNoFollowersStep.NAME); - Step.StepKey deleteStepKey = new Step.StepKey(phase, NAME, DeleteStep.NAME); - - return Arrays.asList(waitForNoFollowerStepKey, deleteStepKey); - } - @Override public int hashCode() { return 1; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java index 2c4508a8355f0..ace29f6b465ae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java @@ -98,14 +98,6 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) return Arrays.asList(readOnlyStep, forceMergeStep, segmentCountStep); } - @Override - public List toStepKeys(String phase) { - StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); - StepKey forceMergeKey = new StepKey(phase, NAME, ForceMergeStep.NAME); - StepKey countKey = new StepKey(phase, NAME, SegmentCountStep.NAME); - return Arrays.asList(readOnlyKey, forceMergeKey, countKey); - } - @Override public int hashCode() { return Objects.hash(maxNumSegments); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeAction.java index 63dbedadd4fe4..7cffaed80917d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/FreezeAction.java @@ -64,12 +64,6 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { return Arrays.asList(freezeStep); } - @Override - public List toStepKeys(String phase) { - StepKey freezeStepKey = new StepKey(phase, NAME, FreezeStep.NAME); - return Arrays.asList(freezeStepKey); - } - @Override public int hashCode() { return 1; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java index 3e84813274d83..d6ef78496bb4d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import java.util.List; @@ -30,15 +29,6 @@ public interface LifecycleAction extends ToXContentObject, NamedWriteable { */ List toSteps(Client client, String phase, @Nullable Step.StepKey nextStepKey); - /** - * - * @param phase - * the name of the phase this action is being executed within - * @return the {@link StepKey}s for the steps which will be executed in this - * action - */ - List toStepKeys(String phase); - /** * @return true if this action is considered safe. An action is not safe if * it will produce unwanted side effects or will get stuck when the diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java index e338d75a98f82..ea34886508030 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import java.io.IOException; import java.util.Collections; @@ -65,11 +64,6 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) Settings readOnlySettings = Settings.builder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true).build(); return Collections.singletonList(new UpdateSettingsStep(key, nextStepKey, client, readOnlySettings)); } - - @Override - public List toStepKeys(String phase) { - return Collections.singletonList(new Step.StepKey(phase, NAME, NAME)); - } @Override public int hashCode() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java index 25346fefa3149..280f8561c35f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java @@ -151,15 +151,6 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) return Arrays.asList(waitForRolloverReadyStep, rolloverStep, updateDateStep, setIndexingCompleteStep); } - @Override - public List toStepKeys(String phase) { - StepKey rolloverReadyStepKey = new StepKey(phase, NAME, WaitForRolloverReadyStep.NAME); - StepKey rolloverStepKey = new StepKey(phase, NAME, RolloverStep.NAME); - StepKey updateDateStepKey = new StepKey(phase, NAME, UpdateRolloverLifecycleDateStep.NAME); - StepKey setIndexingCompleteStepKey = new StepKey(phase, NAME, INDEXING_COMPLETE_STEP_NAME); - return Arrays.asList(rolloverReadyStepKey, rolloverStepKey, updateDateStepKey, setIndexingCompleteStepKey); - } - @Override public int hashCode() { return Objects.hash(maxSize, maxAge, maxDocs); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityAction.java index 507da4613e22a..1bc09e7f42ebf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetPriorityAction.java @@ -90,11 +90,6 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { return Collections.singletonList(new UpdateSettingsStep(key, nextStepKey, client, indexPriority)); } - @Override - public List toStepKeys(String phase) { - return Collections.singletonList(new StepKey(phase, NAME, NAME)); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java index c1b3fb2422965..5fc4b06ec57b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java @@ -111,22 +111,6 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) shrink, allocated, copyMetadata, aliasSwapAndDelete, waitOnShrinkTakeover); } - @Override - public List toStepKeys(String phase) { - StepKey conditionalSkipKey = new StepKey(phase, NAME, BranchingStep.NAME); - StepKey waitForNoFollowerStepKey = new StepKey(phase, NAME, WaitForNoFollowersStep.NAME); - StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); - StepKey setSingleNodeKey = new StepKey(phase, NAME, SetSingleNodeAllocateStep.NAME); - StepKey checkShrinkReadyKey = new StepKey(phase, NAME, CheckShrinkReadyStep.NAME); - StepKey shrinkKey = new StepKey(phase, NAME, ShrinkStep.NAME); - StepKey enoughShardsKey = new StepKey(phase, NAME, ShrunkShardsAllocatedStep.NAME); - StepKey copyMetadataKey = new StepKey(phase, NAME, CopyExecutionStateStep.NAME); - StepKey aliasKey = new StepKey(phase, NAME, ShrinkSetAliasStep.NAME); - StepKey isShrunkIndexKey = new StepKey(phase, NAME, ShrunkenIndexCheckStep.NAME); - return Arrays.asList(conditionalSkipKey, waitForNoFollowerStepKey, readOnlyKey, setSingleNodeKey, checkShrinkReadyKey, shrinkKey, - enoughShardsKey, copyMetadataKey, aliasKey, isShrunkIndexKey); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowAction.java index 20a0fb75b9daa..d8d855b0af2ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowAction.java @@ -54,19 +54,6 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { return Arrays.asList(step1, step2, step3, step4, step5, step6, step7); } - @Override - public List toStepKeys(String phase) { - StepKey indexingCompleteStep = new StepKey(phase, NAME, WaitForIndexingCompleteStep.NAME); - StepKey waitForFollowShardTasksStep = new StepKey(phase, NAME, WaitForFollowShardTasksStep.NAME); - StepKey pauseFollowerIndexStep = new StepKey(phase, NAME, PauseFollowerIndexStep.NAME); - StepKey closeFollowerIndexStep = new StepKey(phase, NAME, CloseFollowerIndexStep.NAME); - StepKey unfollowIndexStep = new StepKey(phase, NAME, UnfollowFollowIndexStep.NAME); - StepKey openFollowerIndexStep = new StepKey(phase, NAME, OpenFollowerIndexStep.NAME); - StepKey waitForYellowStep = new StepKey(phase, NAME, WaitForYellowStep.NAME); - return Arrays.asList(indexingCompleteStep, waitForFollowShardTasksStep, pauseFollowerIndexStep, - closeFollowerIndexStep, unfollowIndexStep, openFollowerIndexStep, waitForYellowStep); - } - @Override public boolean isSafeAction() { // There are no settings to change, so therefor this action should be safe: diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java index bed04a7cf5425..ab35221afec0b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java @@ -7,10 +7,6 @@ package org.elasticsearch.xpack.core.indexlifecycle; import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; - -import java.util.List; -import java.util.stream.Collectors; public abstract class AbstractActionTestCase extends AbstractSerializingTestCase { @@ -25,16 +21,4 @@ public final void testIsSafeAction() { assertEquals(isSafeAction(), action.isSafeAction()); } - public void testToStepKeys() { - T action = createTestInstance(); - String phase = randomAlphaOfLengthBetween(1, 10); - StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), - randomAlphaOfLengthBetween(1, 10)); - List steps = action.toSteps(null, phase, nextStepKey); - assertNotNull(steps); - List stepKeys = action.toStepKeys(phase); - assertNotNull(stepKeys); - List expectedStepKeys = steps.stream().map(Step::getKey).collect(Collectors.toList()); - assertEquals(expectedStepKeys, stepKeys); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java index 30eabac562606..9ca6dc17d3e32 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import java.io.IOException; import java.util.ArrayList; @@ -75,11 +74,6 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) return new ArrayList<>(steps); } - @Override - public List toStepKeys(String phase) { - return steps.stream().map(Step::getKey).collect(Collectors.toList()); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeList(steps.stream().map(MockStep::new).collect(Collectors.toList())); @@ -103,4 +97,4 @@ public boolean equals(Object obj) { return Objects.equals(steps, other.steps) && Objects.equals(safe, other.safe); } -} \ No newline at end of file +} From d8417dcb8c4322003078b8daf22fa82b9c230949 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 9 May 2019 23:02:02 +0200 Subject: [PATCH 057/321] Fix Race in Closing IndicesService.CacheCleaner (#42016) * When close becomes true while the management pool is shut down, we run into an unhandled `EsRejectedExecutionException` that fails tests * Found this while trying to reproduce #32506 * Running the IndexStatsIT in a loop is a way of reproducing this --- .../java/org/elasticsearch/indices/IndicesService.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index a5f6a2e9f1190..be5e1cae4fa8e 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -66,6 +66,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -1219,7 +1220,13 @@ public void run() { } // Reschedule itself to run again if not closed if (closed.get() == false) { - threadPool.schedule(this, interval, ThreadPool.Names.SAME); + try { + threadPool.schedule(this, interval, ThreadPool.Names.SAME); + } catch (EsRejectedExecutionException e) { + if (closed.get() == false) { + throw e; + } + } } } From 1a27eb9a00159e92f493d3551d4b3dbd3176d726 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Thu, 9 May 2019 16:11:02 -0500 Subject: [PATCH 058/321] fix org.elasticsearch.xpack.watcher.test.integration.RejectedExecutionTests (#41777) This commit un-mutes org.elasticsearch.xpack.watcher.test.integration.RejectedExecutionTests which was failing intermittently due to a logic bug. It is not possible to use the real Watcher scheduler (which is needed for this test) and reliabliby count the .triggered-watches since current count of documents in the .triggered-watches index is based on the timing of the scheduler and the ability to delete based on the Watcher and Write thread pools. This commit simply removes the .triggered-watch check and relies soley on the .watcher-history index as an indication that operations that can occur when the Watcher threadpool is rejecting. closes #41734 --- .../test/integration/RejectedExecutionTests.java | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java index f6c46f6c68f71..9492e50048d4e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java @@ -15,8 +15,6 @@ import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; -import java.util.concurrent.TimeUnit; - import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; @@ -25,7 +23,6 @@ import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class RejectedExecutionTests extends AbstractWatcherIntegrationTestCase { @@ -36,8 +33,7 @@ protected boolean timeWarped() { return false; } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41734") - public void testHistoryAndTriggeredOnRejection() throws Exception { + public void testHistoryOnRejection() throws Exception { WatcherClient watcherClient = watcherClient(); createIndex("idx"); client().prepareIndex("idx", "_doc").setSource("field", "a").get(); @@ -56,11 +52,7 @@ public void testHistoryAndTriggeredOnRejection() throws Exception { flushAndRefresh(".watcher-history-*"); SearchResponse searchResponse = client().prepareSearch(".watcher-history-*").get(); assertThat(searchResponse.getHits().getTotalHits().value, greaterThanOrEqualTo(2L)); - }, 10, TimeUnit.SECONDS); - - flushAndRefresh(".triggered_watches"); - SearchResponse searchResponse = client().prepareSearch(".triggered_watches").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + }); } @Override From 2445a933915f420c7f51e8505afa0a7978ce6b0f Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Fri, 10 May 2019 00:26:56 +0300 Subject: [PATCH 059/321] SQL: Add support for FROZEN indices (#41558) Allow querying of FROZEN indices both through dedicated SQL grammar extension: > SELECT field FROM FROZEN index and also through driver configuration parameter, namely: > index.include.frozen: true/false Fix #39390 Fix #39377 --- .../xpack/sql/jdbc/JdbcConfiguration.java | 12 +- .../xpack/sql/jdbc/JdbcHttpClient.java | 3 +- .../xpack/sql/qa/security/CliSecurityIT.java | 3 +- .../sql/qa/security/RestSqlSecurityIT.java | 2 + .../sql/qa/single_node/JdbcCsvSpecIT.java | 4 +- .../qa/single_node/JdbcFrozenCsvSpecIT.java | 42 + .../xpack/sql/qa/jdbc/CsvSpecTestCase.java | 11 +- .../xpack/sql/qa/jdbc/DataLoader.java | 9 + .../xpack/sql/qa/jdbc/ShowTablesTestCase.java | 2 +- .../sql/qa/src/main/resources/alias.csv-spec | 14 +- .../qa/src/main/resources/command.csv-spec | 36 +- .../qa/src/main/resources/docs/docs.csv-spec | 50 +- .../main/resources/setup_mock_show_tables.sql | 3 +- .../src/main/resources/slow/frozen.csv-spec | 59 + .../sql/qa/src/main/resources/slow/readme.txt | 3 + .../xpack/sql/action/SqlQueryRequest.java | 24 +- .../sql/action/SqlQueryRequestBuilder.java | 7 +- .../xpack/sql/action/SqlTranslateRequest.java | 2 +- .../sql/action/SqlQueryRequestTests.java | 4 +- .../xpack/sql/client/HttpClient.java | 5 +- .../xpack/sql/proto/Protocol.java | 1 + .../xpack/sql/proto/SqlQueryRequest.java | 20 +- x-pack/plugin/sql/src/main/antlr/SqlBase.g4 | 10 +- .../plugin/sql/src/main/antlr/SqlBase.tokens | 356 ++-- .../sql/src/main/antlr/SqlBaseLexer.tokens | 354 ++-- .../xpack/sql/analysis/analyzer/Analyzer.java | 6 +- .../sql/analysis/analyzer/PreAnalyzer.java | 9 +- .../sql/analysis/analyzer/TableInfo.java | 28 + .../sql/analysis/index/IndexResolver.java | 135 +- .../search/CompositeAggregationCursor.java | 17 +- .../execution/search/CompositeAggsRowSet.java | 5 +- .../xpack/sql/execution/search/Querier.java | 12 +- .../search/SchemaCompositeAggsRowSet.java | 3 +- .../xpack/sql/parser/CommandBuilder.java | 25 +- .../xpack/sql/parser/LogicalPlanBuilder.java | 2 +- .../xpack/sql/parser/SqlBaseLexer.java | 873 ++++---- .../xpack/sql/parser/SqlBaseParser.java | 1762 +++++++++-------- .../xpack/sql/plan/logical/EsRelation.java | 17 +- .../sql/plan/logical/UnresolvedRelation.java | 15 +- .../sql/plan/logical/command/ShowColumns.java | 15 +- .../sql/plan/logical/command/ShowTables.java | 28 +- .../plan/logical/command/sys/SysColumns.java | 9 +- .../plan/logical/command/sys/SysTables.java | 24 +- .../xpack/sql/planner/Mapper.java | 10 +- .../xpack/sql/planner/QueryFolder.java | 6 +- .../xpack/sql/plugin/RestSqlQueryAction.java | 1 - .../plugin/TransportSqlClearCursorAction.java | 3 +- .../sql/plugin/TransportSqlQueryAction.java | 3 +- .../plugin/TransportSqlTranslateAction.java | 3 +- .../querydsl/container/QueryContainer.java | 61 +- .../xpack/sql/session/Configuration.java | 11 +- .../xpack/sql/session/SqlSession.java | 7 +- .../elasticsearch/xpack/sql/TestUtils.java | 9 +- .../analysis/analyzer/PreAnalyzerTests.java | 24 +- .../CompositeAggregationCursorTests.java | 4 +- .../scalar/DatabaseFunctionTests.java | 4 +- .../function/scalar/UserFunctionTests.java | 3 +- .../xpack/sql/optimizer/OptimizerTests.java | 2 +- .../plan/logical/UnresolvedRelationTests.java | 10 +- .../logical/command/sys/SysColumnsTests.java | 7 +- .../logical/command/sys/SysTablesTests.java | 28 +- 61 files changed, 2311 insertions(+), 1906 deletions(-) create mode 100644 x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcFrozenCsvSpecIT.java create mode 100644 x-pack/plugin/sql/qa/src/main/resources/slow/frozen.csv-spec create mode 100644 x-pack/plugin/sql/qa/src/main/resources/slow/readme.txt create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/TableInfo.java diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java index 7a9154c10ac4e..1c216d8dba7c7 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java @@ -57,10 +57,13 @@ class JdbcConfiguration extends ConnectionConfiguration { static final String FIELD_MULTI_VALUE_LENIENCY = "field.multi.value.leniency"; static final String FIELD_MULTI_VALUE_LENIENCY_DEFAULT = "true"; + static final String INDEX_INCLUDE_FROZEN = "index.include.frozen"; + static final String INDEX_INCLUDE_FROZEN_DEFAULT = "false"; + // options that don't change at runtime private static final Set OPTION_NAMES = new LinkedHashSet<>( - Arrays.asList(TIME_ZONE, FIELD_MULTI_VALUE_LENIENCY, DEBUG, DEBUG_OUTPUT)); + Arrays.asList(TIME_ZONE, FIELD_MULTI_VALUE_LENIENCY, INDEX_INCLUDE_FROZEN, DEBUG, DEBUG_OUTPUT)); static { // trigger version initialization @@ -77,6 +80,7 @@ class JdbcConfiguration extends ConnectionConfiguration { // mutable ones private ZoneId zoneId; private boolean fieldMultiValueLeniency; + private boolean includeFrozen; public static JdbcConfiguration create(String u, Properties props, int loginTimeoutSeconds) throws JdbcSQLException { URI uri = parseUrl(u); @@ -159,6 +163,8 @@ private JdbcConfiguration(URI baseURI, String u, Properties props) throws JdbcSQ s -> TimeZone.getTimeZone(s).toZoneId().normalized()); this.fieldMultiValueLeniency = parseValue(FIELD_MULTI_VALUE_LENIENCY, props.getProperty(FIELD_MULTI_VALUE_LENIENCY, FIELD_MULTI_VALUE_LENIENCY_DEFAULT), Boolean::parseBoolean); + this.includeFrozen = parseValue(INDEX_INCLUDE_FROZEN, props.getProperty(INDEX_INCLUDE_FROZEN, INDEX_INCLUDE_FROZEN_DEFAULT), + Boolean::parseBoolean); } @Override @@ -186,6 +192,10 @@ public boolean fieldMultiValueLeniency() { return fieldMultiValueLeniency; } + public boolean indexIncludeFrozen() { + return includeFrozen; + } + public static boolean canAccept(String url) { return (StringUtils.hasText(url) && url.trim().startsWith(JdbcConfiguration.URL_PREFIX)); } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java index 8bf3811ecb742..55d96afaf7105 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java @@ -63,7 +63,8 @@ Cursor query(String sql, List params, RequestMeta meta) thro Boolean.FALSE, null, new RequestInfo(Mode.JDBC), - conCfg.fieldMultiValueLeniency()); + conCfg.fieldMultiValueLeniency(), + conCfg.indexIncludeFrozen()); SqlQueryResponse response = httpClient.query(sqlRequest); return new DefaultCursor(this, response.cursor(), toJdbcColumnInfo(response.columns()), response.rows(), meta); } diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSecurityIT.java index 2995cc4e57750..ce665f9d1b95d 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSecurityIT.java @@ -145,7 +145,8 @@ public void expectShowTables(List tables, String user) throws Exception String tablesOutput = cli.command("SHOW TABLES"); assertThat(tablesOutput, containsString("name")); assertThat(tablesOutput, containsString("type")); - assertEquals("---------------+---------------", cli.readLine()); + assertThat(tablesOutput, containsString("kind")); + assertEquals("---------------+---------------+---------------", cli.readLine()); for (String table : tables) { String line = null; /* diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java index 6a4a2662810e3..0829eed2da32b 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java @@ -122,6 +122,7 @@ public void expectShowTables(List tables, String user) throws Exception List columns = new ArrayList<>(); columns.add(columnInfo(mode, "name", "keyword", JDBCType.VARCHAR, 32766)); columns.add(columnInfo(mode, "type", "keyword", JDBCType.VARCHAR, 32766)); + columns.add(columnInfo(mode, "kind", "keyword", JDBCType.VARCHAR, 32766)); Map expected = new HashMap<>(); expected.put("columns", columns); List> rows = new ArrayList<>(); @@ -129,6 +130,7 @@ public void expectShowTables(List tables, String user) throws Exception List fields = new ArrayList<>(); fields.add(table); fields.add("BASE TABLE"); + fields.add("INDEX"); rows.add(fields); } expected.put("rows", rows); diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java index f742b1304a79e..428bc1c21ef8f 100644 --- a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java @@ -17,12 +17,12 @@ public class JdbcCsvSpecIT extends CsvSpecTestCase { - @ParametersFactory(argumentFormatting = PARAM_FORMATTING) public static List readScriptSpec() throws Exception { List list = new ArrayList<>(); list.addAll(CsvSpecTestCase.readScriptSpec()); - return readScriptSpec("/single-node-only/command-sys.csv-spec", specParser()); + list.addAll(readScriptSpec("/single-node-only/command-sys.csv-spec", specParser())); + return list; } public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcFrozenCsvSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcFrozenCsvSpecIT.java new file mode 100644 index 0000000000000..b8506cc5e1e86 --- /dev/null +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcFrozenCsvSpecIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.sql.qa.jdbc.CsvSpecTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; + +import java.util.List; +import java.util.Properties; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; + +public class JdbcFrozenCsvSpecIT extends CsvSpecTestCase { + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + return readScriptSpec("/slow/frozen.csv-spec", specParser()); + } + + @Override + protected Properties connectionProperties() { + Properties props = new Properties(super.connectionProperties()); + String timeout = String.valueOf(TimeUnit.MINUTES.toMillis(5)); + props.setProperty("connect.timeout", timeout); + props.setProperty("network.timeout", timeout); + props.setProperty("query.timeout", timeout); + props.setProperty("page.timeout", timeout); + + return props; + } + + + public JdbcFrozenCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java index 40ce2416c249c..00bf9030dbd7a 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.qa.jdbc; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.logging.log4j.Logger; import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; @@ -42,14 +43,8 @@ public CsvSpecTestCase(String fileName, String groupName, String testName, Integ protected final void doTest() throws Throwable { // Run the time tests always in UTC // TODO: https://github.com/elastic/elasticsearch/issues/40779 - if ("time".equals(groupName)) { - try (Connection csv = csvConnection(testCase); Connection es = esJdbc(connectionProperties())) { - executeAndAssert(csv, es); - } - } else { - try (Connection csv = csvConnection(testCase); Connection es = esJdbc()) { - executeAndAssert(csv, es); - } + try (Connection csv = csvConnection(testCase); Connection es = esJdbc()) { + executeAndAssert(csv, es); } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java index b12203294c158..fe2e84e962fd3 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java @@ -49,6 +49,9 @@ protected static void loadEmpDatasetIntoEs(RestClient client) throws Exception { loadLogsDatasetIntoEs(client, "logs", "logs"); makeAlias(client, "test_alias", "test_emp", "test_emp_copy"); makeAlias(client, "test_alias_emp", "test_emp", "test_emp_copy"); + // frozen index + loadEmpDatasetIntoEs(client, "frozen_emp", "employees"); + freeze(client, "frozen_emp"); } public static void loadDocsDatasetIntoEs(RestClient client) throws Exception { @@ -292,6 +295,12 @@ protected static void makeAlias(RestClient client, String aliasName, String... i } } + protected static void freeze(RestClient client, String... indices) throws Exception { + for (String index : indices) { + client.performRequest(new Request("POST", "/" + index + "/_freeze")); + } + } + private static void csvToLines(String name, CheckedBiConsumer, List, Exception> consumeLine) throws Exception { String location = "/" + name + ".csv"; URL dataSet = SqlSpecTestCase.class.getResource(location); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ShowTablesTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ShowTablesTestCase.java index ab2ddc9a7fe24..4a6882685a928 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ShowTablesTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ShowTablesTestCase.java @@ -30,7 +30,7 @@ public void testShowTablesWithManyIndices() throws Exception { for (int i = 0; i < indices; i++) { String index = String.format(Locale.ROOT, "test%02d", i); index(index, builder -> builder.field("name", "bob")); - h2.createStatement().executeUpdate("INSERT INTO mock VALUES ('" + index + "', 'BASE TABLE');"); + h2.createStatement().executeUpdate("INSERT INTO mock VALUES ('" + index + "', 'BASE TABLE', 'INDEX');"); } ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock ORDER BY name"); diff --git a/x-pack/plugin/sql/qa/src/main/resources/alias.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/alias.csv-spec index 4134db187c9a6..e18a5ab7efcce 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/alias.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/alias.csv-spec @@ -82,20 +82,20 @@ salary |INTEGER |integer showAlias SHOW TABLES LIKE 'test\_alias' ESCAPE '\'; -name:s | type:s +name:s | type:s | kind:s -test_alias | VIEW +test_alias | VIEW | ALIAS ; showPattern SHOW TABLES LIKE 'test_%'; -name:s | type:s +name:s | type:s | kind :s -test_alias | VIEW -test_alias_emp | VIEW -test_emp | BASE TABLE -test_emp_copy | BASE TABLE +test_alias | VIEW | ALIAS +test_alias_emp | VIEW | ALIAS +test_emp | BASE TABLE | INDEX +test_emp_copy | BASE TABLE | INDEX ; groupByOnAlias diff --git a/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec index f60f686cb8a3d..9f63de97c9928 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec @@ -189,49 +189,49 @@ TODAY |SCALAR showTables SHOW TABLES; - name | type -logs |BASE TABLE -test_alias |VIEW -test_alias_emp |VIEW -test_emp |BASE TABLE -test_emp_copy |BASE TABLE + name | type | kind +logs |BASE TABLE |INDEX +test_alias |VIEW |ALIAS +test_alias_emp |VIEW |ALIAS +test_emp |BASE TABLE |INDEX +test_emp_copy |BASE TABLE |INDEX ; showTablesSimpleLike SHOW TABLES LIKE 'test_emp'; - name:s | type:s -test_emp |BASE TABLE + name:s | type:s | kind:s +test_emp |BASE TABLE |INDEX ; showTablesMultiLike SHOW TABLES LIKE 'test_emp%'; - name:s | type:s -test_emp |BASE TABLE -test_emp_copy |BASE TABLE + name:s | type:s |kind:s +test_emp |BASE TABLE |INDEX +test_emp_copy |BASE TABLE |INDEX ; showTablesIdentifier SHOW TABLES "test_emp"; - name:s | type:s -test_emp |BASE TABLE + name:s | type:s |kind:s +test_emp |BASE TABLE |INDEX ; showTablesIdentifierPattern SHOW TABLES "test_e*,-test_emp"; - name:s | type:s -test_emp_copy |BASE TABLE + name:s | type:s |kind:s +test_emp_copy |BASE TABLE |INDEX ; showTablesIdentifierPatternOnAliases SHOW TABLES "test*,-test_emp*"; - name:s | type:s -test_alias |VIEW -test_alias_emp |VIEW + name:s | type:s | kind:s +test_alias |VIEW |ALIAS +test_alias_emp |VIEW |ALIAS ; // DESCRIBE diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index c181023db03eb..2fe719e8cb485 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -93,11 +93,11 @@ showTables // tag::showTables SHOW TABLES; - name | type ----------------+--------------- -emp |BASE TABLE -employees |VIEW -library |BASE TABLE + name | type | kind +---------------+---------------+--------------- +emp |BASE TABLE |INDEX +employees |VIEW |ALIAS +library |BASE TABLE |INDEX // end::showTables ; @@ -106,9 +106,9 @@ showTablesLikeExact // tag::showTablesLikeExact SHOW TABLES LIKE 'emp'; - name | type ----------------+--------------- -emp |BASE TABLE + name | type | kind +---------------+---------------+--------------- +emp |BASE TABLE |INDEX // end::showTablesLikeExact ; @@ -117,10 +117,10 @@ showTablesLikeWildcard // tag::showTablesLikeWildcard SHOW TABLES LIKE 'emp%'; - name | type ----------------+--------------- -emp |BASE TABLE -employees |VIEW + name | type | kind +---------------+---------------+--------------- +emp |BASE TABLE |INDEX +employees |VIEW |ALIAS // end::showTablesLikeWildcard ; @@ -130,9 +130,9 @@ showTablesLikeOneChar // tag::showTablesLikeOneChar SHOW TABLES LIKE 'em_'; - name | type ----------------+--------------- -emp |BASE TABLE + name | type | kind +---------------+---------------+--------------- +emp |BASE TABLE |INDEX // end::showTablesLikeOneChar ; @@ -141,20 +141,20 @@ showTablesLikeMixed // tag::showTablesLikeMixed SHOW TABLES LIKE '%em_'; - name | type ----------------+--------------- -emp |BASE TABLE + name | type | kind +---------------+---------------+--------------- +emp |BASE TABLE |INDEX // end::showTablesLikeMixed ; showTablesLikeEscape -schema::name:s|type:s +schema::name:s|type:s|kind:s // tag::showTablesLikeEscape SHOW TABLES LIKE 'emp!%' ESCAPE '!'; - name | type ----------------+--------------- + name | type | kind +---------------+---------------+--------------- // end::showTablesLikeEscape ; @@ -164,10 +164,10 @@ showTablesEsMultiIndex // tag::showTablesEsMultiIndex SHOW TABLES "*,-l*"; - name | type ----------------+--------------- -emp |BASE TABLE -employees |VIEW + name | type | kind +---------------+---------------+--------------- +emp |BASE TABLE |INDEX +employees |VIEW |ALIAS // end::showTablesEsMultiIndex ; diff --git a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_show_tables.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_show_tables.sql index b65be73066e41..53286eadc5261 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_show_tables.sql +++ b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_show_tables.sql @@ -1,4 +1,5 @@ CREATE TABLE mock ( "name" VARCHAR, - "type" VARCHAR + "type" VARCHAR, + "kind" VARCHAR ); diff --git a/x-pack/plugin/sql/qa/src/main/resources/slow/frozen.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/slow/frozen.csv-spec new file mode 100644 index 0000000000000..6175bea6034cd --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/slow/frozen.csv-spec @@ -0,0 +1,59 @@ +// +// Frozen indices tests +// + +showTables +SHOW TABLES INCLUDE FROZEN; + + name | type | kind +frozen_emp |BASE TABLE |FROZEN INDEX +logs |BASE TABLE |INDEX +test_alias |VIEW |ALIAS +test_alias_emp |VIEW |ALIAS +test_emp |BASE TABLE |INDEX +test_emp_copy |BASE TABLE |INDEX +; + +columnFromFrozen +SELECT gender FROM FROZEN frozen_emp ORDER BY gender LIMIT 5; + +gender:s +F +F +F +F +F +; + +percentileFrozen +SELECT gender, PERCENTILE(emp_no, 92.45) p1 FROM FROZEN frozen_emp GROUP BY gender; + +gender:s | p1:d +null |10018.745 +F |10098.0085 +M |10091.393 +; + +countFromFrozen +SELECT gender, COUNT(*) AS c FROM FROZEN frozen_emp GROUP BY gender; + +gender:s | c:l +null |10 +F |33 +M |57 +; + +sum +SELECT SUM(salary) FROM FROZEN frozen_emp; + + SUM(salary) +--------------- +4824855 +; + +kurtosisAndSkewnessNoGroup +SELECT KURTOSIS(emp_no) k, SKEWNESS(salary) s FROM FROZEN frozen_emp; + +k:d | s:d +1.7997599759975997 | 0.2707722118423227 +; \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/slow/readme.txt b/x-pack/plugin/sql/qa/src/main/resources/slow/readme.txt new file mode 100644 index 0000000000000..a95c75b7665ba --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/slow/readme.txt @@ -0,0 +1,3 @@ +Slow tests are placed in this folder so that they don't get picked up accidently through the classpath discovery. +A good example are frozen tests, which by their nature, take a LOT more time to execute and thus would cause +the other 'normal' tests to time-out. \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java index 77f4a4994ed86..06abde5cef4df 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java @@ -33,12 +33,14 @@ public class SqlQueryRequest extends AbstractSqlQueryRequest { private static final ObjectParser PARSER = objectParser(SqlQueryRequest::new); static final ParseField COLUMNAR = new ParseField("columnar"); static final ParseField FIELD_MULTI_VALUE_LENIENCY = new ParseField("field_multi_value_leniency"); + static final ParseField INDEX_INCLUDE_FROZEN = new ParseField("index_include_frozen"); static { PARSER.declareString(SqlQueryRequest::cursor, CURSOR); PARSER.declareBoolean(SqlQueryRequest::columnar, COLUMNAR); PARSER.declareBoolean(SqlQueryRequest::fieldMultiValueLeniency, FIELD_MULTI_VALUE_LENIENCY); + PARSER.declareBoolean(SqlQueryRequest::indexIncludeFrozen, INDEX_INCLUDE_FROZEN); } private String cursor = ""; @@ -48,6 +50,7 @@ public class SqlQueryRequest extends AbstractSqlQueryRequest { */ private Boolean columnar = Boolean.FALSE; private boolean fieldMultiValueLeniency = Protocol.FIELD_MULTI_VALUE_LENIENCY; + private boolean indexIncludeFrozen = Protocol.INDEX_INCLUDE_FROZEN; public SqlQueryRequest() { super(); @@ -55,11 +58,12 @@ public SqlQueryRequest() { public SqlQueryRequest(String query, List params, QueryBuilder filter, ZoneId zoneId, int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, Boolean columnar, - String cursor, RequestInfo requestInfo, boolean fieldMultiValueLeniency) { + String cursor, RequestInfo requestInfo, boolean fieldMultiValueLeniency, boolean indexIncludeFrozen) { super(query, params, filter, zoneId, fetchSize, requestTimeout, pageTimeout, requestInfo); this.cursor = cursor; this.columnar = columnar; this.fieldMultiValueLeniency = fieldMultiValueLeniency; + this.indexIncludeFrozen = indexIncludeFrozen; } @Override @@ -115,11 +119,21 @@ public boolean fieldMultiValueLeniency() { return fieldMultiValueLeniency; } + public SqlQueryRequest indexIncludeFrozen(boolean include) { + this.indexIncludeFrozen = include; + return this; + } + + public boolean indexIncludeFrozen() { + return indexIncludeFrozen; + } + public SqlQueryRequest(StreamInput in) throws IOException { super(in); cursor = in.readString(); columnar = in.readOptionalBoolean(); fieldMultiValueLeniency = in.readBoolean(); + indexIncludeFrozen = in.readBoolean(); } @Override @@ -128,11 +142,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(cursor); out.writeOptionalBoolean(columnar); out.writeBoolean(fieldMultiValueLeniency); + out.writeBoolean(indexIncludeFrozen); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), cursor, columnar); + return Objects.hash(super.hashCode(), cursor, columnar, fieldMultiValueLeniency, indexIncludeFrozen); } @Override @@ -140,7 +155,8 @@ public boolean equals(Object obj) { return super.equals(obj) && Objects.equals(cursor, ((SqlQueryRequest) obj).cursor) && Objects.equals(columnar, ((SqlQueryRequest) obj).columnar) - && fieldMultiValueLeniency == ((SqlQueryRequest) obj).fieldMultiValueLeniency; + && fieldMultiValueLeniency == ((SqlQueryRequest) obj).fieldMultiValueLeniency + && indexIncludeFrozen == ((SqlQueryRequest) obj).indexIncludeFrozen; } @Override @@ -152,7 +168,7 @@ public String getDescription() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { // This is needed just to test round-trip compatibility with proto.SqlQueryRequest return new org.elasticsearch.xpack.sql.proto.SqlQueryRequest(query(), params(), zoneId(), fetchSize(), requestTimeout(), - pageTimeout(), filter(), columnar(), cursor(), requestInfo(), fieldMultiValueLeniency()) + pageTimeout(), filter(), columnar(), cursor(), requestInfo(), fieldMultiValueLeniency(), indexIncludeFrozen()) .toXContent(builder, params); } diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestBuilder.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestBuilder.java index 71f2774def97e..f2ede4b463cdf 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestBuilder.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestBuilder.java @@ -25,15 +25,16 @@ public class SqlQueryRequestBuilder extends ActionRequestBuilder params, QueryBuilder filter, ZoneId zoneId, int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, boolean columnar, String nextPageInfo, RequestInfo requestInfo, - boolean multiValueFieldLeniency) { + boolean multiValueFieldLeniency, boolean indexIncludeFrozen) { super(client, action, new SqlQueryRequest(query, params, filter, zoneId, fetchSize, requestTimeout, pageTimeout, columnar, - nextPageInfo, requestInfo, multiValueFieldLeniency)); + nextPageInfo, requestInfo, multiValueFieldLeniency, indexIncludeFrozen)); } public SqlQueryRequestBuilder query(String query) { diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateRequest.java index 009d0823d7c8a..0679283411667 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateRequest.java @@ -69,7 +69,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws null, null, requestInfo(), + false, false).toXContent(builder, params); - } } diff --git a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestTests.java b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestTests.java index ce33f9a42e90d..992f55e5a3b00 100644 --- a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestTests.java +++ b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestTests.java @@ -56,7 +56,7 @@ protected SqlQueryRequest createTestInstance() { return new SqlQueryRequest(randomAlphaOfLength(10), randomParameters(), SqlTestUtils.randomFilterOrNull(random()), randomZone(), between(1, Integer.MAX_VALUE), randomTV(), randomTV(), randomBoolean(), randomAlphaOfLength(10), requestInfo, - randomBoolean() + randomBoolean(), randomBoolean() ); } @@ -115,7 +115,7 @@ protected SqlQueryRequest mutateInstance(SqlQueryRequest instance) { ); SqlQueryRequest newRequest = new SqlQueryRequest(instance.query(), instance.params(), instance.filter(), instance.zoneId(), instance.fetchSize(), instance.requestTimeout(), instance.pageTimeout(), instance.columnar(), - instance.cursor(), instance.requestInfo(), instance.fieldMultiValueLeniency()); + instance.cursor(), instance.requestInfo(), instance.fieldMultiValueLeniency(), instance.indexIncludeFrozen()); mutator.accept(newRequest); return newRequest; } diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java index 24dfcf76ef11e..f93a3042da337 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java @@ -64,13 +64,14 @@ public SqlQueryResponse basicQuery(String query, int fetchSize) throws SQLExcept // TODO allow customizing the time zone - this is what session set/reset/get should be about // method called only from CLI SqlQueryRequest sqlRequest = new SqlQueryRequest(query, Collections.emptyList(), Protocol.TIME_ZONE, - fetchSize, - TimeValue.timeValueMillis(cfg.queryTimeout()), + fetchSize, + TimeValue.timeValueMillis(cfg.queryTimeout()), TimeValue.timeValueMillis(cfg.pageTimeout()), null, Boolean.FALSE, null, new RequestInfo(Mode.CLI), + false, false); return query(sqlRequest); } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java index 13471afe2212f..29238bf4064a5 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java @@ -23,6 +23,7 @@ public final class Protocol { public static final TimeValue REQUEST_TIMEOUT = TimeValue.timeValueSeconds(90); public static final TimeValue PAGE_TIMEOUT = TimeValue.timeValueSeconds(45); public static final boolean FIELD_MULTI_VALUE_LENIENCY = false; + public static final boolean INDEX_INCLUDE_FROZEN = false; /** * SQL-related endpoints diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java index ec027a55a9365..15aa9566a489f 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java @@ -33,12 +33,11 @@ public class SqlQueryRequest extends AbstractSqlRequest { private final Boolean columnar; private final List params; private final boolean fieldMultiValueLeniency; - + private final boolean indexIncludeFrozen; public SqlQueryRequest(String query, List params, ZoneId zoneId, int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, ToXContent filter, Boolean columnar, - String cursor, RequestInfo requestInfo, - boolean fieldMultiValueLeniency) { + String cursor, RequestInfo requestInfo, boolean fieldMultiValueLeniency, boolean indexIncludeFrozen) { super(requestInfo); this.query = query; this.params = params; @@ -50,11 +49,12 @@ public SqlQueryRequest(String query, List params, ZoneId zon this.columnar = columnar; this.cursor = cursor; this.fieldMultiValueLeniency = fieldMultiValueLeniency; + this.indexIncludeFrozen = indexIncludeFrozen; } public SqlQueryRequest(String cursor, TimeValue requestTimeout, TimeValue pageTimeout, RequestInfo requestInfo) { this("", Collections.emptyList(), Protocol.TIME_ZONE, Protocol.FETCH_SIZE, requestTimeout, pageTimeout, - null, false, cursor, requestInfo, Protocol.FIELD_MULTI_VALUE_LENIENCY); + null, false, cursor, requestInfo, Protocol.FIELD_MULTI_VALUE_LENIENCY, Protocol.INDEX_INCLUDE_FROZEN); } /** @@ -127,6 +127,10 @@ public boolean fieldMultiValueLeniency() { return fieldMultiValueLeniency; } + public boolean indexIncludeFrozen() { + return indexIncludeFrozen; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -148,13 +152,14 @@ public boolean equals(Object o) { Objects.equals(filter, that.filter) && Objects.equals(columnar, that.columnar) && Objects.equals(cursor, that.cursor) && - fieldMultiValueLeniency == that.fieldMultiValueLeniency; + fieldMultiValueLeniency == that.fieldMultiValueLeniency && + indexIncludeFrozen == that.indexIncludeFrozen; } @Override public int hashCode() { return Objects.hash(super.hashCode(), query, zoneId, fetchSize, requestTimeout, pageTimeout, - filter, columnar, cursor, fieldMultiValueLeniency); + filter, columnar, cursor, fieldMultiValueLeniency, indexIncludeFrozen); } @Override @@ -195,6 +200,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (fieldMultiValueLeniency) { builder.field("field_multi_value_leniency", fieldMultiValueLeniency); } + if (indexIncludeFrozen) { + builder.field("index_include_frozen", indexIncludeFrozen); + } if (cursor != null) { builder.field("cursor", cursor); } diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 index f60610fc75119..76af159be902f 100644 --- a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 +++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 @@ -50,9 +50,9 @@ statement )* ')')? statement #debug - | SHOW TABLES (tableLike=likePattern | tableIdent=tableIdentifier)? #showTables - | SHOW COLUMNS (FROM | IN) (tableLike=likePattern | tableIdent=tableIdentifier) #showColumns - | (DESCRIBE | DESC) (tableLike=likePattern | tableIdent=tableIdentifier) #showColumns + | SHOW TABLES (INCLUDE FROZEN)? (tableLike=likePattern | tableIdent=tableIdentifier)? #showTables + | SHOW COLUMNS (INCLUDE FROZEN)? (FROM | IN) (tableLike=likePattern | tableIdent=tableIdentifier) #showColumns + | (DESCRIBE | DESC) (INCLUDE FROZEN)? (tableLike=likePattern | tableIdent=tableIdentifier) #showColumns | SHOW FUNCTIONS (likePattern)? #showFunctions | SHOW SCHEMAS #showSchemas | SYS TABLES (CATALOG clusterLike=likePattern)? @@ -149,7 +149,7 @@ joinCriteria ; relationPrimary - : tableIdentifier (AS? qualifiedName)? #tableName + : FROZEN? tableIdentifier (AS? qualifiedName)? #tableName | '(' queryNoWith ')' (AS? qualifiedName)? #aliasedQuery | '(' relation ')' (AS? qualifiedName)? #aliasedRelation ; @@ -399,6 +399,7 @@ FALSE: 'FALSE'; FIRST: 'FIRST'; FORMAT: 'FORMAT'; FROM: 'FROM'; +FROZEN: 'FROZEN'; FULL: 'FULL'; FUNCTIONS: 'FUNCTIONS'; GRAPHVIZ: 'GRAPHVIZ'; @@ -407,6 +408,7 @@ HAVING: 'HAVING'; HOUR: 'HOUR'; HOURS: 'HOURS'; IN: 'IN'; +INCLUDE: 'INCLUDE'; INNER: 'INNER'; INTERVAL: 'INTERVAL'; IS: 'IS'; diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens b/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens index 0b24423bbee54..7eeec75f9c928 100644 --- a/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens +++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens @@ -37,101 +37,103 @@ FALSE=36 FIRST=37 FORMAT=38 FROM=39 -FULL=40 -FUNCTIONS=41 -GRAPHVIZ=42 -GROUP=43 -HAVING=44 -HOUR=45 -HOURS=46 -IN=47 -INNER=48 -INTERVAL=49 -IS=50 -JOIN=51 -LAST=52 -LEFT=53 -LIKE=54 -LIMIT=55 -MAPPED=56 -MATCH=57 -MINUTE=58 -MINUTES=59 -MONTH=60 -MONTHS=61 -NATURAL=62 -NOT=63 -NULL=64 -NULLS=65 -ON=66 -OPTIMIZED=67 -OR=68 -ORDER=69 -OUTER=70 -PARSED=71 -PHYSICAL=72 -PLAN=73 -RIGHT=74 -RLIKE=75 -QUERY=76 -SCHEMAS=77 -SECOND=78 -SECONDS=79 -SELECT=80 -SHOW=81 -SYS=82 -TABLE=83 -TABLES=84 -TEXT=85 -THEN=86 -TRUE=87 -TO=88 -TYPE=89 -TYPES=90 -USING=91 -VERIFY=92 -WHEN=93 -WHERE=94 -WITH=95 -YEAR=96 -YEARS=97 -ESCAPE_ESC=98 -FUNCTION_ESC=99 -LIMIT_ESC=100 -DATE_ESC=101 -TIME_ESC=102 -TIMESTAMP_ESC=103 -GUID_ESC=104 -ESC_END=105 -EQ=106 -NULLEQ=107 -NEQ=108 -LT=109 -LTE=110 -GT=111 -GTE=112 -PLUS=113 -MINUS=114 -ASTERISK=115 -SLASH=116 -PERCENT=117 -CAST_OP=118 -CONCAT=119 -DOT=120 -PARAM=121 -STRING=122 -INTEGER_VALUE=123 -DECIMAL_VALUE=124 -IDENTIFIER=125 -DIGIT_IDENTIFIER=126 -TABLE_IDENTIFIER=127 -QUOTED_IDENTIFIER=128 -BACKQUOTED_IDENTIFIER=129 -SIMPLE_COMMENT=130 -BRACKETED_COMMENT=131 -WS=132 -UNRECOGNIZED=133 -DELIMITER=134 +FROZEN=40 +FULL=41 +FUNCTIONS=42 +GRAPHVIZ=43 +GROUP=44 +HAVING=45 +HOUR=46 +HOURS=47 +IN=48 +INCLUDE=49 +INNER=50 +INTERVAL=51 +IS=52 +JOIN=53 +LAST=54 +LEFT=55 +LIKE=56 +LIMIT=57 +MAPPED=58 +MATCH=59 +MINUTE=60 +MINUTES=61 +MONTH=62 +MONTHS=63 +NATURAL=64 +NOT=65 +NULL=66 +NULLS=67 +ON=68 +OPTIMIZED=69 +OR=70 +ORDER=71 +OUTER=72 +PARSED=73 +PHYSICAL=74 +PLAN=75 +RIGHT=76 +RLIKE=77 +QUERY=78 +SCHEMAS=79 +SECOND=80 +SECONDS=81 +SELECT=82 +SHOW=83 +SYS=84 +TABLE=85 +TABLES=86 +TEXT=87 +THEN=88 +TRUE=89 +TO=90 +TYPE=91 +TYPES=92 +USING=93 +VERIFY=94 +WHEN=95 +WHERE=96 +WITH=97 +YEAR=98 +YEARS=99 +ESCAPE_ESC=100 +FUNCTION_ESC=101 +LIMIT_ESC=102 +DATE_ESC=103 +TIME_ESC=104 +TIMESTAMP_ESC=105 +GUID_ESC=106 +ESC_END=107 +EQ=108 +NULLEQ=109 +NEQ=110 +LT=111 +LTE=112 +GT=113 +GTE=114 +PLUS=115 +MINUS=116 +ASTERISK=117 +SLASH=118 +PERCENT=119 +CAST_OP=120 +CONCAT=121 +DOT=122 +PARAM=123 +STRING=124 +INTEGER_VALUE=125 +DECIMAL_VALUE=126 +IDENTIFIER=127 +DIGIT_IDENTIFIER=128 +TABLE_IDENTIFIER=129 +QUOTED_IDENTIFIER=130 +BACKQUOTED_IDENTIFIER=131 +SIMPLE_COMMENT=132 +BRACKETED_COMMENT=133 +WS=134 +UNRECOGNIZED=135 +DELIMITER=136 '('=1 ')'=2 ','=3 @@ -171,84 +173,86 @@ DELIMITER=134 'FIRST'=37 'FORMAT'=38 'FROM'=39 -'FULL'=40 -'FUNCTIONS'=41 -'GRAPHVIZ'=42 -'GROUP'=43 -'HAVING'=44 -'HOUR'=45 -'HOURS'=46 -'IN'=47 -'INNER'=48 -'INTERVAL'=49 -'IS'=50 -'JOIN'=51 -'LAST'=52 -'LEFT'=53 -'LIKE'=54 -'LIMIT'=55 -'MAPPED'=56 -'MATCH'=57 -'MINUTE'=58 -'MINUTES'=59 -'MONTH'=60 -'MONTHS'=61 -'NATURAL'=62 -'NOT'=63 -'NULL'=64 -'NULLS'=65 -'ON'=66 -'OPTIMIZED'=67 -'OR'=68 -'ORDER'=69 -'OUTER'=70 -'PARSED'=71 -'PHYSICAL'=72 -'PLAN'=73 -'RIGHT'=74 -'RLIKE'=75 -'QUERY'=76 -'SCHEMAS'=77 -'SECOND'=78 -'SECONDS'=79 -'SELECT'=80 -'SHOW'=81 -'SYS'=82 -'TABLE'=83 -'TABLES'=84 -'TEXT'=85 -'THEN'=86 -'TRUE'=87 -'TO'=88 -'TYPE'=89 -'TYPES'=90 -'USING'=91 -'VERIFY'=92 -'WHEN'=93 -'WHERE'=94 -'WITH'=95 -'YEAR'=96 -'YEARS'=97 -'{ESCAPE'=98 -'{FN'=99 -'{LIMIT'=100 -'{D'=101 -'{T'=102 -'{TS'=103 -'{GUID'=104 -'}'=105 -'='=106 -'<=>'=107 -'<'=109 -'<='=110 -'>'=111 -'>='=112 -'+'=113 -'-'=114 -'*'=115 -'/'=116 -'%'=117 -'::'=118 -'||'=119 -'.'=120 -'?'=121 +'FROZEN'=40 +'FULL'=41 +'FUNCTIONS'=42 +'GRAPHVIZ'=43 +'GROUP'=44 +'HAVING'=45 +'HOUR'=46 +'HOURS'=47 +'IN'=48 +'INCLUDE'=49 +'INNER'=50 +'INTERVAL'=51 +'IS'=52 +'JOIN'=53 +'LAST'=54 +'LEFT'=55 +'LIKE'=56 +'LIMIT'=57 +'MAPPED'=58 +'MATCH'=59 +'MINUTE'=60 +'MINUTES'=61 +'MONTH'=62 +'MONTHS'=63 +'NATURAL'=64 +'NOT'=65 +'NULL'=66 +'NULLS'=67 +'ON'=68 +'OPTIMIZED'=69 +'OR'=70 +'ORDER'=71 +'OUTER'=72 +'PARSED'=73 +'PHYSICAL'=74 +'PLAN'=75 +'RIGHT'=76 +'RLIKE'=77 +'QUERY'=78 +'SCHEMAS'=79 +'SECOND'=80 +'SECONDS'=81 +'SELECT'=82 +'SHOW'=83 +'SYS'=84 +'TABLE'=85 +'TABLES'=86 +'TEXT'=87 +'THEN'=88 +'TRUE'=89 +'TO'=90 +'TYPE'=91 +'TYPES'=92 +'USING'=93 +'VERIFY'=94 +'WHEN'=95 +'WHERE'=96 +'WITH'=97 +'YEAR'=98 +'YEARS'=99 +'{ESCAPE'=100 +'{FN'=101 +'{LIMIT'=102 +'{D'=103 +'{T'=104 +'{TS'=105 +'{GUID'=106 +'}'=107 +'='=108 +'<=>'=109 +'<'=111 +'<='=112 +'>'=113 +'>='=114 +'+'=115 +'-'=116 +'*'=117 +'/'=118 +'%'=119 +'::'=120 +'||'=121 +'.'=122 +'?'=123 diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens b/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens index 21925952a2e34..603e67fec88c1 100644 --- a/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens +++ b/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens @@ -37,100 +37,102 @@ FALSE=36 FIRST=37 FORMAT=38 FROM=39 -FULL=40 -FUNCTIONS=41 -GRAPHVIZ=42 -GROUP=43 -HAVING=44 -HOUR=45 -HOURS=46 -IN=47 -INNER=48 -INTERVAL=49 -IS=50 -JOIN=51 -LAST=52 -LEFT=53 -LIKE=54 -LIMIT=55 -MAPPED=56 -MATCH=57 -MINUTE=58 -MINUTES=59 -MONTH=60 -MONTHS=61 -NATURAL=62 -NOT=63 -NULL=64 -NULLS=65 -ON=66 -OPTIMIZED=67 -OR=68 -ORDER=69 -OUTER=70 -PARSED=71 -PHYSICAL=72 -PLAN=73 -RIGHT=74 -RLIKE=75 -QUERY=76 -SCHEMAS=77 -SECOND=78 -SECONDS=79 -SELECT=80 -SHOW=81 -SYS=82 -TABLE=83 -TABLES=84 -TEXT=85 -THEN=86 -TRUE=87 -TO=88 -TYPE=89 -TYPES=90 -USING=91 -VERIFY=92 -WHEN=93 -WHERE=94 -WITH=95 -YEAR=96 -YEARS=97 -ESCAPE_ESC=98 -FUNCTION_ESC=99 -LIMIT_ESC=100 -DATE_ESC=101 -TIME_ESC=102 -TIMESTAMP_ESC=103 -GUID_ESC=104 -ESC_END=105 -EQ=106 -NULLEQ=107 -NEQ=108 -LT=109 -LTE=110 -GT=111 -GTE=112 -PLUS=113 -MINUS=114 -ASTERISK=115 -SLASH=116 -PERCENT=117 -CAST_OP=118 -CONCAT=119 -DOT=120 -PARAM=121 -STRING=122 -INTEGER_VALUE=123 -DECIMAL_VALUE=124 -IDENTIFIER=125 -DIGIT_IDENTIFIER=126 -TABLE_IDENTIFIER=127 -QUOTED_IDENTIFIER=128 -BACKQUOTED_IDENTIFIER=129 -SIMPLE_COMMENT=130 -BRACKETED_COMMENT=131 -WS=132 -UNRECOGNIZED=133 +FROZEN=40 +FULL=41 +FUNCTIONS=42 +GRAPHVIZ=43 +GROUP=44 +HAVING=45 +HOUR=46 +HOURS=47 +IN=48 +INCLUDE=49 +INNER=50 +INTERVAL=51 +IS=52 +JOIN=53 +LAST=54 +LEFT=55 +LIKE=56 +LIMIT=57 +MAPPED=58 +MATCH=59 +MINUTE=60 +MINUTES=61 +MONTH=62 +MONTHS=63 +NATURAL=64 +NOT=65 +NULL=66 +NULLS=67 +ON=68 +OPTIMIZED=69 +OR=70 +ORDER=71 +OUTER=72 +PARSED=73 +PHYSICAL=74 +PLAN=75 +RIGHT=76 +RLIKE=77 +QUERY=78 +SCHEMAS=79 +SECOND=80 +SECONDS=81 +SELECT=82 +SHOW=83 +SYS=84 +TABLE=85 +TABLES=86 +TEXT=87 +THEN=88 +TRUE=89 +TO=90 +TYPE=91 +TYPES=92 +USING=93 +VERIFY=94 +WHEN=95 +WHERE=96 +WITH=97 +YEAR=98 +YEARS=99 +ESCAPE_ESC=100 +FUNCTION_ESC=101 +LIMIT_ESC=102 +DATE_ESC=103 +TIME_ESC=104 +TIMESTAMP_ESC=105 +GUID_ESC=106 +ESC_END=107 +EQ=108 +NULLEQ=109 +NEQ=110 +LT=111 +LTE=112 +GT=113 +GTE=114 +PLUS=115 +MINUS=116 +ASTERISK=117 +SLASH=118 +PERCENT=119 +CAST_OP=120 +CONCAT=121 +DOT=122 +PARAM=123 +STRING=124 +INTEGER_VALUE=125 +DECIMAL_VALUE=126 +IDENTIFIER=127 +DIGIT_IDENTIFIER=128 +TABLE_IDENTIFIER=129 +QUOTED_IDENTIFIER=130 +BACKQUOTED_IDENTIFIER=131 +SIMPLE_COMMENT=132 +BRACKETED_COMMENT=133 +WS=134 +UNRECOGNIZED=135 '('=1 ')'=2 ','=3 @@ -170,84 +172,86 @@ UNRECOGNIZED=133 'FIRST'=37 'FORMAT'=38 'FROM'=39 -'FULL'=40 -'FUNCTIONS'=41 -'GRAPHVIZ'=42 -'GROUP'=43 -'HAVING'=44 -'HOUR'=45 -'HOURS'=46 -'IN'=47 -'INNER'=48 -'INTERVAL'=49 -'IS'=50 -'JOIN'=51 -'LAST'=52 -'LEFT'=53 -'LIKE'=54 -'LIMIT'=55 -'MAPPED'=56 -'MATCH'=57 -'MINUTE'=58 -'MINUTES'=59 -'MONTH'=60 -'MONTHS'=61 -'NATURAL'=62 -'NOT'=63 -'NULL'=64 -'NULLS'=65 -'ON'=66 -'OPTIMIZED'=67 -'OR'=68 -'ORDER'=69 -'OUTER'=70 -'PARSED'=71 -'PHYSICAL'=72 -'PLAN'=73 -'RIGHT'=74 -'RLIKE'=75 -'QUERY'=76 -'SCHEMAS'=77 -'SECOND'=78 -'SECONDS'=79 -'SELECT'=80 -'SHOW'=81 -'SYS'=82 -'TABLE'=83 -'TABLES'=84 -'TEXT'=85 -'THEN'=86 -'TRUE'=87 -'TO'=88 -'TYPE'=89 -'TYPES'=90 -'USING'=91 -'VERIFY'=92 -'WHEN'=93 -'WHERE'=94 -'WITH'=95 -'YEAR'=96 -'YEARS'=97 -'{ESCAPE'=98 -'{FN'=99 -'{LIMIT'=100 -'{D'=101 -'{T'=102 -'{TS'=103 -'{GUID'=104 -'}'=105 -'='=106 -'<=>'=107 -'<'=109 -'<='=110 -'>'=111 -'>='=112 -'+'=113 -'-'=114 -'*'=115 -'/'=116 -'%'=117 -'::'=118 -'||'=119 -'.'=120 -'?'=121 +'FROZEN'=40 +'FULL'=41 +'FUNCTIONS'=42 +'GRAPHVIZ'=43 +'GROUP'=44 +'HAVING'=45 +'HOUR'=46 +'HOURS'=47 +'IN'=48 +'INCLUDE'=49 +'INNER'=50 +'INTERVAL'=51 +'IS'=52 +'JOIN'=53 +'LAST'=54 +'LEFT'=55 +'LIKE'=56 +'LIMIT'=57 +'MAPPED'=58 +'MATCH'=59 +'MINUTE'=60 +'MINUTES'=61 +'MONTH'=62 +'MONTHS'=63 +'NATURAL'=64 +'NOT'=65 +'NULL'=66 +'NULLS'=67 +'ON'=68 +'OPTIMIZED'=69 +'OR'=70 +'ORDER'=71 +'OUTER'=72 +'PARSED'=73 +'PHYSICAL'=74 +'PLAN'=75 +'RIGHT'=76 +'RLIKE'=77 +'QUERY'=78 +'SCHEMAS'=79 +'SECOND'=80 +'SECONDS'=81 +'SELECT'=82 +'SHOW'=83 +'SYS'=84 +'TABLE'=85 +'TABLES'=86 +'TEXT'=87 +'THEN'=88 +'TRUE'=89 +'TO'=90 +'TYPE'=91 +'TYPES'=92 +'USING'=93 +'VERIFY'=94 +'WHEN'=95 +'WHERE'=96 +'WITH'=97 +'YEAR'=98 +'YEARS'=99 +'{ESCAPE'=100 +'{FN'=101 +'{LIMIT'=102 +'{D'=103 +'{T'=104 +'{TS'=105 +'{GUID'=106 +'}'=107 +'='=108 +'<=>'=109 +'<'=111 +'<='=112 +'>'=113 +'>='=114 +'+'=115 +'-'=116 +'*'=117 +'/'=118 +'%'=119 +'::'=120 +'||'=121 +'.'=122 +'?'=123 diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java index 65a9410941b17..40a34dcf006b0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java @@ -288,11 +288,11 @@ private class ResolveTable extends AnalyzeRule { protected LogicalPlan rule(UnresolvedRelation plan) { TableIdentifier table = plan.table(); if (indexResolution.isValid() == false) { - return plan.unresolvedMessage().equals(indexResolution.toString()) ? plan : new UnresolvedRelation(plan.source(), - plan.table(), plan.alias(), indexResolution.toString()); + return plan.unresolvedMessage().equals(indexResolution.toString()) ? plan : + new UnresolvedRelation(plan.source(), plan.table(), plan.alias(), plan.frozen(), indexResolution.toString()); } assert indexResolution.matches(table.index()); - LogicalPlan logicalPlan = new EsRelation(plan.source(), indexResolution.get()); + LogicalPlan logicalPlan = new EsRelation(plan.source(), indexResolution.get(), plan.frozen()); SubQueryAlias sa = new SubQueryAlias(plan.source(), logicalPlan, table.index()); if (plan.alias() != null) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzer.java index 68b73cf3a019f..16e35cd8638a2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzer.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.analysis.analyzer; -import org.elasticsearch.xpack.sql.plan.TableIdentifier; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation; @@ -23,9 +22,9 @@ public class PreAnalyzer { public static class PreAnalysis { public static final PreAnalysis EMPTY = new PreAnalysis(emptyList()); - public final List indices; + public final List indices; - PreAnalysis(List indices) { + PreAnalysis(List indices) { this.indices = indices; } } @@ -39,9 +38,9 @@ public PreAnalysis preAnalyze(LogicalPlan plan) { } private PreAnalysis doPreAnalyze(LogicalPlan plan) { - List indices = new ArrayList<>(); + List indices = new ArrayList<>(); - plan.forEachUp(p -> indices.add(p.table()), UnresolvedRelation.class); + plan.forEachUp(p -> indices.add(new TableInfo(p.table(), p.frozen())), UnresolvedRelation.class); // mark plan as preAnalyzed (if it were marked, there would be no analysis) plan.forEachUp(LogicalPlan::setPreAnalyzed); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/TableInfo.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/TableInfo.java new file mode 100644 index 0000000000000..479b094fad5bc --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/TableInfo.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.xpack.sql.plan.TableIdentifier; + +public class TableInfo { + + private final TableIdentifier id; + private final boolean isFrozen; + + TableInfo(TableIdentifier id, boolean isFrozen) { + this.id = id; + this.isFrozen = isFrozen; + } + + public TableIdentifier id() { + return id; + } + + public boolean isFrozen() { + return isFrozen; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java index 8f51fa65b7463..2fb5028e987e8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DateEsField; @@ -42,7 +43,6 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; @@ -57,38 +57,39 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.elasticsearch.action.ActionListener.wrap; public class IndexResolver { public enum IndexType { - - INDEX("BASE TABLE"), - ALIAS("VIEW"), + STANDARD_INDEX("BASE TABLE", "INDEX"), + ALIAS("VIEW", "ALIAS"), + FROZEN_INDEX("BASE TABLE", "FROZEN INDEX"), // value for user types unrecognized - UNKNOWN("UNKNOWN"); + UNKNOWN("UNKNOWN", "UNKNOWN"); + + public static final String SQL_BASE_TABLE = "BASE TABLE"; + public static final String SQL_TABLE = "TABLE"; + public static final String SQL_VIEW = "VIEW"; - public static final EnumSet VALID = EnumSet.of(INDEX, ALIAS); + public static final EnumSet VALID_INCLUDE_FROZEN = EnumSet.of(STANDARD_INDEX, ALIAS, FROZEN_INDEX); + public static final EnumSet VALID_REGULAR = EnumSet.of(STANDARD_INDEX, ALIAS); + public static final EnumSet INDICES_ONLY = EnumSet.of(STANDARD_INDEX, FROZEN_INDEX); private final String toSql; + private final String toNative; - IndexType(String sql) { + IndexType(String sql, String toNative) { this.toSql = sql; + this.toNative = toNative; } public String toSql() { return toSql; } - - public static IndexType from(String name) { - if (name != null) { - name = name.toUpperCase(Locale.ROOT); - for (IndexType type : IndexType.VALID) { - if (type.toSql.equals(name)) { - return type; - } - } - } - return IndexType.UNKNOWN; + + public String toNative() { + return toNative; } } @@ -136,7 +137,17 @@ public boolean equals(Object obj) { } private static final IndicesOptions INDICES_ONLY_OPTIONS = new IndicesOptions( + EnumSet.of(Option.ALLOW_NO_INDICES, Option.IGNORE_UNAVAILABLE, Option.IGNORE_ALIASES, Option.IGNORE_THROTTLED), + EnumSet.of(WildcardStates.OPEN)); + private static final IndicesOptions FROZEN_INDICES_OPTIONS = new IndicesOptions( EnumSet.of(Option.ALLOW_NO_INDICES, Option.IGNORE_UNAVAILABLE, Option.IGNORE_ALIASES), EnumSet.of(WildcardStates.OPEN)); + + public static final IndicesOptions FIELD_CAPS_INDICES_OPTIONS = new IndicesOptions( + EnumSet.of(Option.ALLOW_NO_INDICES, Option.IGNORE_UNAVAILABLE, Option.IGNORE_THROTTLED), EnumSet.of(WildcardStates.OPEN)); + public static final IndicesOptions FIELD_CAPS_FROZEN_INDICES_OPTIONS = new IndicesOptions( + EnumSet.of(Option.ALLOW_NO_INDICES, Option.IGNORE_UNAVAILABLE), EnumSet.of(WildcardStates.OPEN)); + + private static final List FIELD_NAMES_BLACKLIST = Arrays.asList("_size"); private static final String UNMAPPED = "unmapped"; @@ -161,7 +172,8 @@ public void resolveNames(String indexWildcard, String javaRegex, EnumSet - resolveIndices(indices, javaRegex, aliases, retrieveIndices, listener), + client.admin().indices().getAliases(aliasRequest, wrap(aliases -> + resolveIndices(indices, javaRegex, aliases, retrieveIndices, retrieveFrozenIndices, listener), ex -> { // with security, two exception can be thrown: // INFE - if no alias matches @@ -179,36 +191,46 @@ public void resolveNames(String indexWildcard, String javaRegex, EnumSet> listener) { + boolean retrieveIndices, boolean retrieveFrozenIndices, ActionListener> listener) { - if (retrieveIndices) { + if (retrieveIndices || retrieveFrozenIndices) { + GetIndexRequest indexRequest = new GetIndexRequest() .local(true) .indices(indices) .features(Feature.SETTINGS) .includeDefaults(false) .indicesOptions(INDICES_ONLY_OPTIONS); + + // if frozen indices are requested, make sure to update the request accordingly + if (retrieveFrozenIndices) { + indexRequest.indicesOptions(FROZEN_INDICES_OPTIONS); + } client.admin().indices().getIndex(indexRequest, - ActionListener.wrap(response -> filterResults(javaRegex, aliases, response, listener), + wrap(response -> filterResults(javaRegex, aliases, response, retrieveIndices, retrieveFrozenIndices, listener), listener::onFailure)); + } else { - filterResults(javaRegex, aliases, null, listener); + filterResults(javaRegex, aliases, null, false, false, listener); } } private void filterResults(String javaRegex, GetAliasesResponse aliases, GetIndexResponse indices, + // these are needed to filter out the different results from the same index response + boolean retrieveIndices, + boolean retrieveFrozenIndices, ActionListener> listener) { // since the index name does not support ?, filter the results manually @@ -226,12 +248,16 @@ private void filterResults(String javaRegex, GetAliasesResponse aliases, GetInde } } } + // filter indices (if present) String[] indicesNames = indices != null ? indices.indices() : null; if (indicesNames != null) { for (String indexName : indicesNames) { + boolean isFrozen = retrieveFrozenIndices + && IndexSettings.INDEX_SEARCH_THROTTLED.get(indices.getSettings().get(indexName)) == Boolean.TRUE; + if (pattern == null || pattern.matcher(indexName).matches()) { - result.add(new IndexInfo(indexName, IndexType.INDEX)); + result.add(new IndexInfo(indexName, isFrozen ? IndexType.FROZEN_INDEX : IndexType.STANDARD_INDEX)); } } } @@ -242,8 +268,9 @@ private void filterResults(String javaRegex, GetAliasesResponse aliases, GetInde /** * Resolves a pattern to one (potentially compound meaning that spawns multiple indices) mapping. */ - public void resolveAsMergedMapping(String indexWildcard, String javaRegex, ActionListener listener) { - FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard); + public void resolveAsMergedMapping(String indexWildcard, String javaRegex, boolean includeFrozen, + ActionListener listener) { + FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard, includeFrozen); client.fieldCaps(fieldRequest, ActionListener.wrap( response -> listener.onResponse(mergedMappings(indexWildcard, response.getIndices(), response.get())), @@ -287,7 +314,7 @@ static IndexResolution mergedMappings(String indexPattern, String[] indexNames, // type is okay, check aggregation else { FieldCapabilities fieldCap = types.values().iterator().next(); - + // validate search/agg-able if (fieldCap.isAggregatable() && fieldCap.nonAggregatableIndices() != null) { errorMessage.append("mapped as aggregatable except in "); @@ -305,16 +332,16 @@ static IndexResolution mergedMappings(String indexPattern, String[] indexNames, return new InvalidMappedField(n, errorMessage.toString()); } } - + // everything checks return null; }); - + if (indices.size() != 1) { throw new SqlIllegalArgumentException("Incorrect merging of mappings (likely due to a bug) - expect 1 but found [{}]", indices.size()); } - + return IndexResolution.valid(indices.get(0)); } @@ -356,7 +383,7 @@ private static EsField createField(String fieldName, Map> listener) { - FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard); + public void resolveAsSeparateMappings(String indexWildcard, String javaRegex, boolean includeFrozen, + ActionListener> listener) { + FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard, includeFrozen); client.fieldCaps(fieldRequest, ActionListener.wrap( response -> listener.onResponse(separateMappings(indexWildcard, javaRegex, response.getIndices(), response.get())), listener::onFailure)); - + } static List separateMappings(String indexPattern, String javaRegex, String[] indexNames, @@ -430,7 +467,7 @@ private static List buildIndices(String[] indexNames, String javaRegex, for (Entry> entry : sortedFields) { String fieldName = entry.getKey(); Map types = entry.getValue(); - + // ignore size added by the mapper plugin if (FIELD_NAMES_BLACKLIST.contains(fieldName)) { continue; @@ -438,7 +475,7 @@ private static List buildIndices(String[] indexNames, String javaRegex, // apply verification final InvalidMappedField invalidField = validityVerifier.apply(fieldName, types); - + // filter meta fields and unmapped FieldCapabilities unmapped = types.get(UNMAPPED); Set unmappedIndices = unmapped != null ? new HashSet<>(asList(unmapped.indices())) : emptySet(); @@ -447,7 +484,7 @@ private static List buildIndices(String[] indexNames, String javaRegex, for (Entry typeEntry : types.entrySet()) { FieldCapabilities typeCap = typeEntry.getValue(); String[] capIndices = typeCap.indices(); - + // Skip internal fields (name starting with underscore and its type reported by field_caps starts // with underscore as well). A meta field named "_version", for example, has the type named "_version". if (typeEntry.getKey().startsWith("_") && typeCap.getType().startsWith("_")) { @@ -483,9 +520,9 @@ private static List buildIndices(String[] indexNames, String javaRegex, } EsField field = indexFields.flattedMapping.get(fieldName); if (field == null || (invalidField != null && (field instanceof InvalidMappedField) == false)) { - createField(fieldName, fieldCaps, indexFields.hierarchicalMapping, indexFields.flattedMapping, s -> - invalidField != null ? invalidField : - createField(s, typeCap.getType(), emptyMap(), typeCap.isAggregatable())); + createField(fieldName, fieldCaps, indexFields.hierarchicalMapping, indexFields.flattedMapping, + s -> invalidField != null ? invalidField : createField(s, typeCap.getType(), emptyMap(), + typeCap.isAggregatable())); } } } @@ -500,14 +537,4 @@ private static List buildIndices(String[] indexNames, String javaRegex, foundIndices.sort(Comparator.comparing(EsIndex::name)); return foundIndices; } - - private static FieldCapabilitiesRequest createFieldCapsRequest(String index) { - return new FieldCapabilitiesRequest() - .indices(Strings.commaDelimitedListToStringArray(index)) - .fields("*") - .includeUnmapped(true) - //lenient because we throw our own errors looking at the response e.g. if something was not resolved - //also because this way security doesn't throw authorization exceptions but rather honors ignore_unavailable - .indicesOptions(IndicesOptions.lenientExpandOpen()); - } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java index b09e98d11c17d..34fab72ca1385 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java @@ -52,13 +52,16 @@ public class CompositeAggregationCursor implements Cursor { private final List extractors; private final BitSet mask; private final int limit; + private final boolean includeFrozen; - CompositeAggregationCursor(byte[] next, List exts, BitSet mask, int remainingLimit, String... indices) { + CompositeAggregationCursor(byte[] next, List exts, BitSet mask, int remainingLimit, boolean includeFrozen, + String... indices) { this.indices = indices; this.nextQuery = next; this.extractors = exts; this.mask = mask; this.limit = remainingLimit; + this.includeFrozen = includeFrozen; } public CompositeAggregationCursor(StreamInput in) throws IOException { @@ -68,6 +71,7 @@ public CompositeAggregationCursor(StreamInput in) throws IOException { extractors = in.readNamedWriteableList(BucketExtractor.class); mask = BitSet.valueOf(in.readByteArray()); + includeFrozen = in.readBoolean(); } @Override @@ -78,6 +82,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteableList(extractors); out.writeByteArray(mask.toByteArray()); + out.writeBoolean(includeFrozen); + } @Override @@ -105,6 +111,10 @@ int limit() { return limit; } + boolean includeFrozen() { + return includeFrozen; + } + @Override public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { SearchSourceBuilder q; @@ -120,7 +130,7 @@ public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry re log.trace("About to execute composite query {} on {}", StringUtils.toString(query), indices); } - SearchRequest search = Querier.prepareRequest(client, query, cfg.pageTimeout(), indices); + SearchRequest search = Querier.prepareRequest(client, query, cfg.pageTimeout(), includeFrozen, indices); client.search(search, new ActionListener() { @Override @@ -134,7 +144,8 @@ public void onResponse(SearchResponse r) { } updateCompositeAfterKey(r, query); - CompositeAggsRowSet rowSet = new CompositeAggsRowSet(extractors, mask, r, limit, serializeQuery(query), indices); + CompositeAggsRowSet rowSet = new CompositeAggsRowSet(extractors, mask, r, limit, serializeQuery(query), includeFrozen, + indices); listener.onResponse(rowSet); } catch (Exception ex) { listener.onFailure(ex); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java index fbbc839fe1c76..f93e00eac5ac9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java @@ -28,7 +28,8 @@ class CompositeAggsRowSet extends ResultRowSet { private final int size; private int row = 0; - CompositeAggsRowSet(List exts, BitSet mask, SearchResponse response, int limit, byte[] next, String... indices) { + CompositeAggsRowSet(List exts, BitSet mask, SearchResponse response, int limit, byte[] next, + boolean includeFrozen, String... indices) { super(exts, mask); CompositeAggregation composite = CompositeAggregationCursor.getComposite(response); @@ -53,7 +54,7 @@ class CompositeAggsRowSet extends ResultRowSet { if (next == null || size == 0 || remainingLimit == 0) { cursor = Cursor.EMPTY; } else { - cursor = new CompositeAggregationCursor(next, exts, mask, remainingLimit, indices); + cursor = new CompositeAggregationCursor(next, exts, mask, remainingLimit, includeFrozen, indices); } } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index fec7000a78780..17e5a79fa4a71 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filters; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; import org.elasticsearch.xpack.sql.execution.PlanExecutor; import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor; import org.elasticsearch.xpack.sql.execution.search.extractor.CompositeKeyExtractor; @@ -108,7 +109,8 @@ public void query(List output, QueryContainer query, String index, Ac log.trace("About to execute query {} on {}", StringUtils.toString(sourceBuilder), index); } - SearchRequest search = prepareRequest(client, sourceBuilder, timeout, Strings.commaDelimitedListToStringArray(index)); + SearchRequest search = prepareRequest(client, sourceBuilder, timeout, query.shouldIncludeFrozen(), + Strings.commaDelimitedListToStringArray(index)); @SuppressWarnings("rawtypes") List> sortingColumns = query.sortingColumns(); @@ -130,13 +132,16 @@ public void query(List output, QueryContainer query, String index, Ac client.search(search, l); } - public static SearchRequest prepareRequest(Client client, SearchSourceBuilder source, TimeValue timeout, String... indices) { + public static SearchRequest prepareRequest(Client client, SearchSourceBuilder source, TimeValue timeout, boolean includeFrozen, + String... indices) { SearchRequest search = client.prepareSearch(indices) // always track total hits accurately .setTrackTotalHits(true) .setAllowPartialSearchResults(false) .setSource(source) .setTimeout(timeout) + .setIndicesOptions( + includeFrozen ? IndexResolver.FIELD_CAPS_FROZEN_INDICES_OPTIONS : IndexResolver.FIELD_CAPS_INDICES_OPTIONS) .request(); return search; } @@ -175,7 +180,7 @@ class LocalAggregationSorterListener implements ActionListener { } } - this.data = new PriorityQueue, Integer>>(size) { + this.data = new PriorityQueue<>(size) { // compare row based on the received attribute sort // if a sort item is not in the list, it is assumed the sorting happened in ES @@ -389,6 +394,7 @@ protected void handleResponse(SearchResponse response, ActionListener exts, BitSet mask, SearchResponse response, int limitAggs, byte[] next, + boolean includeFrozen, String... indices) { - super(exts, mask, response, limitAggs, next, indices); + super(exts, mask, response, limitAggs, next, includeFrozen, indices); this.schema = schema; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java index ba2a39069953a..6de27b7776338 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java @@ -125,7 +125,7 @@ public Object visitShowFunctions(ShowFunctionsContext ctx) { public Object visitShowTables(ShowTablesContext ctx) { TableIdentifier ti = visitTableIdentifier(ctx.tableIdent); String index = ti != null ? ti.qualifiedIndex() : null; - return new ShowTables(source(ctx), index, visitLikePattern(ctx.likePattern())); + return new ShowTables(source(ctx), index, visitLikePattern(ctx.likePattern()), ctx.FROZEN() != null); } @Override @@ -137,7 +137,7 @@ public Object visitShowSchemas(ShowSchemasContext ctx) { public Object visitShowColumns(ShowColumnsContext ctx) { TableIdentifier ti = visitTableIdentifier(ctx.tableIdent); String index = ti != null ? ti.qualifiedIndex() : null; - return new ShowColumns(source(ctx), index, visitLikePattern(ctx.likePattern())); + return new ShowColumns(source(ctx), index, visitLikePattern(ctx.likePattern()), ctx.FROZEN() != null); } @Override @@ -154,12 +154,21 @@ public SysTables visitSysTables(SysTablesContext ctx) { } // special case for legacy apps (like msquery) that always asks for 'TABLE' // which we manually map to all concrete tables supported - else if (value.toUpperCase(Locale.ROOT).equals("TABLE")) { - legacyTableType = true; - types.add(IndexType.INDEX); - } else { - IndexType type = IndexType.from(value); - types.add(type); + else { + switch (value.toUpperCase(Locale.ROOT)) { + case IndexType.SQL_TABLE: + legacyTableType = true; + types.add(IndexType.STANDARD_INDEX); + break; + case IndexType.SQL_BASE_TABLE: + types.add(IndexType.STANDARD_INDEX); + break; + case IndexType.SQL_VIEW: + types.add(IndexType.ALIAS); + break; + default: + types.add(IndexType.UNKNOWN); + } } } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java index f27368912c1d2..429e572878f55 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java @@ -211,6 +211,6 @@ public Object visitSubquery(SubqueryContext ctx) { public LogicalPlan visitTableName(TableNameContext ctx) { String alias = visitQualifiedName(ctx.qualifiedName()); TableIdentifier tableIdentifier = visitTableIdentifier(ctx.tableIdentifier()); - return new UnresolvedRelation(source(ctx), tableIdentifier, alias); + return new UnresolvedRelation(source(ctx), tableIdentifier, alias, ctx.FROZEN() != null); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java index a537f9f369e92..de8afac152686 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java @@ -1,16 +1,13 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.xpack.sql.parser; - -import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.RuntimeMetaData; -import org.antlr.v4.runtime.Vocabulary; -import org.antlr.v4.runtime.VocabularyImpl; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.atn.*; import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.*; @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) class SqlBaseLexer extends Lexer { @@ -25,21 +22,21 @@ class SqlBaseLexer extends Lexer { COLUMNS=18, CONVERT=19, CURRENT_DATE=20, CURRENT_TIME=21, CURRENT_TIMESTAMP=22, DAY=23, DAYS=24, DEBUG=25, DESC=26, DESCRIBE=27, DISTINCT=28, ELSE=29, END=30, ESCAPE=31, EXECUTABLE=32, EXISTS=33, EXPLAIN=34, EXTRACT=35, FALSE=36, - FIRST=37, FORMAT=38, FROM=39, FULL=40, FUNCTIONS=41, GRAPHVIZ=42, GROUP=43, - HAVING=44, HOUR=45, HOURS=46, IN=47, INNER=48, INTERVAL=49, IS=50, JOIN=51, - LAST=52, LEFT=53, LIKE=54, LIMIT=55, MAPPED=56, MATCH=57, MINUTE=58, MINUTES=59, - MONTH=60, MONTHS=61, NATURAL=62, NOT=63, NULL=64, NULLS=65, ON=66, OPTIMIZED=67, - OR=68, ORDER=69, OUTER=70, PARSED=71, PHYSICAL=72, PLAN=73, RIGHT=74, - RLIKE=75, QUERY=76, SCHEMAS=77, SECOND=78, SECONDS=79, SELECT=80, SHOW=81, - SYS=82, TABLE=83, TABLES=84, TEXT=85, THEN=86, TRUE=87, TO=88, TYPE=89, - TYPES=90, USING=91, VERIFY=92, WHEN=93, WHERE=94, WITH=95, YEAR=96, YEARS=97, - ESCAPE_ESC=98, FUNCTION_ESC=99, LIMIT_ESC=100, DATE_ESC=101, TIME_ESC=102, - TIMESTAMP_ESC=103, GUID_ESC=104, ESC_END=105, EQ=106, NULLEQ=107, NEQ=108, - LT=109, LTE=110, GT=111, GTE=112, PLUS=113, MINUS=114, ASTERISK=115, SLASH=116, - PERCENT=117, CAST_OP=118, CONCAT=119, DOT=120, PARAM=121, STRING=122, - INTEGER_VALUE=123, DECIMAL_VALUE=124, IDENTIFIER=125, DIGIT_IDENTIFIER=126, - TABLE_IDENTIFIER=127, QUOTED_IDENTIFIER=128, BACKQUOTED_IDENTIFIER=129, - SIMPLE_COMMENT=130, BRACKETED_COMMENT=131, WS=132, UNRECOGNIZED=133; + FIRST=37, FORMAT=38, FROM=39, FROZEN=40, FULL=41, FUNCTIONS=42, GRAPHVIZ=43, + GROUP=44, HAVING=45, HOUR=46, HOURS=47, IN=48, INCLUDE=49, INNER=50, INTERVAL=51, + IS=52, JOIN=53, LAST=54, LEFT=55, LIKE=56, LIMIT=57, MAPPED=58, MATCH=59, + MINUTE=60, MINUTES=61, MONTH=62, MONTHS=63, NATURAL=64, NOT=65, NULL=66, + NULLS=67, ON=68, OPTIMIZED=69, OR=70, ORDER=71, OUTER=72, PARSED=73, PHYSICAL=74, + PLAN=75, RIGHT=76, RLIKE=77, QUERY=78, SCHEMAS=79, SECOND=80, SECONDS=81, + SELECT=82, SHOW=83, SYS=84, TABLE=85, TABLES=86, TEXT=87, THEN=88, TRUE=89, + TO=90, TYPE=91, TYPES=92, USING=93, VERIFY=94, WHEN=95, WHERE=96, WITH=97, + YEAR=98, YEARS=99, ESCAPE_ESC=100, FUNCTION_ESC=101, LIMIT_ESC=102, DATE_ESC=103, + TIME_ESC=104, TIMESTAMP_ESC=105, GUID_ESC=106, ESC_END=107, EQ=108, NULLEQ=109, + NEQ=110, LT=111, LTE=112, GT=113, GTE=114, PLUS=115, MINUS=116, ASTERISK=117, + SLASH=118, PERCENT=119, CAST_OP=120, CONCAT=121, DOT=122, PARAM=123, STRING=124, + INTEGER_VALUE=125, DECIMAL_VALUE=126, IDENTIFIER=127, DIGIT_IDENTIFIER=128, + TABLE_IDENTIFIER=129, QUOTED_IDENTIFIER=130, BACKQUOTED_IDENTIFIER=131, + SIMPLE_COMMENT=132, BRACKETED_COMMENT=133, WS=134, UNRECOGNIZED=135; public static String[] modeNames = { "DEFAULT_MODE" }; @@ -50,20 +47,20 @@ class SqlBaseLexer extends Lexer { "CONVERT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "DAY", "DAYS", "DEBUG", "DESC", "DESCRIBE", "DISTINCT", "ELSE", "END", "ESCAPE", "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", "FALSE", "FIRST", "FORMAT", - "FROM", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", "HAVING", "HOUR", "HOURS", - "IN", "INNER", "INTERVAL", "IS", "JOIN", "LAST", "LEFT", "LIKE", "LIMIT", - "MAPPED", "MATCH", "MINUTE", "MINUTES", "MONTH", "MONTHS", "NATURAL", - "NOT", "NULL", "NULLS", "ON", "OPTIMIZED", "OR", "ORDER", "OUTER", "PARSED", - "PHYSICAL", "PLAN", "RIGHT", "RLIKE", "QUERY", "SCHEMAS", "SECOND", "SECONDS", - "SELECT", "SHOW", "SYS", "TABLE", "TABLES", "TEXT", "THEN", "TRUE", "TO", - "TYPE", "TYPES", "USING", "VERIFY", "WHEN", "WHERE", "WITH", "YEAR", "YEARS", - "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC", "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", - "GUID_ESC", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", - "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", "CONCAT", - "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", "IDENTIFIER", - "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", - "EXPONENT", "DIGIT", "LETTER", "SIMPLE_COMMENT", "BRACKETED_COMMENT", - "WS", "UNRECOGNIZED" + "FROM", "FROZEN", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", "HAVING", + "HOUR", "HOURS", "IN", "INCLUDE", "INNER", "INTERVAL", "IS", "JOIN", "LAST", + "LEFT", "LIKE", "LIMIT", "MAPPED", "MATCH", "MINUTE", "MINUTES", "MONTH", + "MONTHS", "NATURAL", "NOT", "NULL", "NULLS", "ON", "OPTIMIZED", "OR", + "ORDER", "OUTER", "PARSED", "PHYSICAL", "PLAN", "RIGHT", "RLIKE", "QUERY", + "SCHEMAS", "SECOND", "SECONDS", "SELECT", "SHOW", "SYS", "TABLE", "TABLES", + "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES", "USING", "VERIFY", "WHEN", + "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC", + "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", "GUID_ESC", "ESC_END", "EQ", + "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", + "SLASH", "PERCENT", "CAST_OP", "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE", + "DECIMAL_VALUE", "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", + "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", "EXPONENT", "DIGIT", "LETTER", + "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", "UNRECOGNIZED" }; private static final String[] _LITERAL_NAMES = { @@ -73,18 +70,18 @@ class SqlBaseLexer extends Lexer { "'CURRENT_TIME'", "'CURRENT_TIMESTAMP'", "'DAY'", "'DAYS'", "'DEBUG'", "'DESC'", "'DESCRIBE'", "'DISTINCT'", "'ELSE'", "'END'", "'ESCAPE'", "'EXECUTABLE'", "'EXISTS'", "'EXPLAIN'", "'EXTRACT'", "'FALSE'", "'FIRST'", "'FORMAT'", - "'FROM'", "'FULL'", "'FUNCTIONS'", "'GRAPHVIZ'", "'GROUP'", "'HAVING'", - "'HOUR'", "'HOURS'", "'IN'", "'INNER'", "'INTERVAL'", "'IS'", "'JOIN'", - "'LAST'", "'LEFT'", "'LIKE'", "'LIMIT'", "'MAPPED'", "'MATCH'", "'MINUTE'", - "'MINUTES'", "'MONTH'", "'MONTHS'", "'NATURAL'", "'NOT'", "'NULL'", "'NULLS'", - "'ON'", "'OPTIMIZED'", "'OR'", "'ORDER'", "'OUTER'", "'PARSED'", "'PHYSICAL'", - "'PLAN'", "'RIGHT'", "'RLIKE'", "'QUERY'", "'SCHEMAS'", "'SECOND'", "'SECONDS'", - "'SELECT'", "'SHOW'", "'SYS'", "'TABLE'", "'TABLES'", "'TEXT'", "'THEN'", - "'TRUE'", "'TO'", "'TYPE'", "'TYPES'", "'USING'", "'VERIFY'", "'WHEN'", - "'WHERE'", "'WITH'", "'YEAR'", "'YEARS'", "'{ESCAPE'", "'{FN'", "'{LIMIT'", - "'{D'", "'{T'", "'{TS'", "'{GUID'", "'}'", "'='", "'<=>'", null, "'<'", - "'<='", "'>'", "'>='", "'+'", "'-'", "'*'", "'/'", "'%'", "'::'", "'||'", - "'.'", "'?'" + "'FROM'", "'FROZEN'", "'FULL'", "'FUNCTIONS'", "'GRAPHVIZ'", "'GROUP'", + "'HAVING'", "'HOUR'", "'HOURS'", "'IN'", "'INCLUDE'", "'INNER'", "'INTERVAL'", + "'IS'", "'JOIN'", "'LAST'", "'LEFT'", "'LIKE'", "'LIMIT'", "'MAPPED'", + "'MATCH'", "'MINUTE'", "'MINUTES'", "'MONTH'", "'MONTHS'", "'NATURAL'", + "'NOT'", "'NULL'", "'NULLS'", "'ON'", "'OPTIMIZED'", "'OR'", "'ORDER'", + "'OUTER'", "'PARSED'", "'PHYSICAL'", "'PLAN'", "'RIGHT'", "'RLIKE'", "'QUERY'", + "'SCHEMAS'", "'SECOND'", "'SECONDS'", "'SELECT'", "'SHOW'", "'SYS'", "'TABLE'", + "'TABLES'", "'TEXT'", "'THEN'", "'TRUE'", "'TO'", "'TYPE'", "'TYPES'", + "'USING'", "'VERIFY'", "'WHEN'", "'WHERE'", "'WITH'", "'YEAR'", "'YEARS'", + "'{ESCAPE'", "'{FN'", "'{LIMIT'", "'{D'", "'{T'", "'{TS'", "'{GUID'", + "'}'", "'='", "'<=>'", null, "'<'", "'<='", "'>'", "'>='", "'+'", "'-'", + "'*'", "'/'", "'%'", "'::'", "'||'", "'.'", "'?'" }; private static final String[] _SYMBOLIC_NAMES = { null, null, null, null, null, "ALL", "ANALYZE", "ANALYZED", "AND", "ANY", @@ -92,19 +89,20 @@ class SqlBaseLexer extends Lexer { "CONVERT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "DAY", "DAYS", "DEBUG", "DESC", "DESCRIBE", "DISTINCT", "ELSE", "END", "ESCAPE", "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", "FALSE", "FIRST", "FORMAT", - "FROM", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", "HAVING", "HOUR", "HOURS", - "IN", "INNER", "INTERVAL", "IS", "JOIN", "LAST", "LEFT", "LIKE", "LIMIT", - "MAPPED", "MATCH", "MINUTE", "MINUTES", "MONTH", "MONTHS", "NATURAL", - "NOT", "NULL", "NULLS", "ON", "OPTIMIZED", "OR", "ORDER", "OUTER", "PARSED", - "PHYSICAL", "PLAN", "RIGHT", "RLIKE", "QUERY", "SCHEMAS", "SECOND", "SECONDS", - "SELECT", "SHOW", "SYS", "TABLE", "TABLES", "TEXT", "THEN", "TRUE", "TO", - "TYPE", "TYPES", "USING", "VERIFY", "WHEN", "WHERE", "WITH", "YEAR", "YEARS", - "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC", "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", - "GUID_ESC", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", - "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", "CONCAT", - "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", "IDENTIFIER", - "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", - "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", "UNRECOGNIZED" + "FROM", "FROZEN", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", "HAVING", + "HOUR", "HOURS", "IN", "INCLUDE", "INNER", "INTERVAL", "IS", "JOIN", "LAST", + "LEFT", "LIKE", "LIMIT", "MAPPED", "MATCH", "MINUTE", "MINUTES", "MONTH", + "MONTHS", "NATURAL", "NOT", "NULL", "NULLS", "ON", "OPTIMIZED", "OR", + "ORDER", "OUTER", "PARSED", "PHYSICAL", "PLAN", "RIGHT", "RLIKE", "QUERY", + "SCHEMAS", "SECOND", "SECONDS", "SELECT", "SHOW", "SYS", "TABLE", "TABLES", + "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES", "USING", "VERIFY", "WHEN", + "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC", + "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", "GUID_ESC", "ESC_END", "EQ", + "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", + "SLASH", "PERCENT", "CAST_OP", "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE", + "DECIMAL_VALUE", "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", + "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", "SIMPLE_COMMENT", "BRACKETED_COMMENT", + "WS", "UNRECOGNIZED" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -161,7 +159,7 @@ public SqlBaseLexer(CharStream input) { public ATN getATN() { return _ATN; } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\u0087\u045e\b\1\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\u0089\u0471\b\1\4"+ "\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n"+ "\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22"+ "\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31"+ @@ -177,375 +175,384 @@ public SqlBaseLexer(CharStream input) { "\4w\tw\4x\tx\4y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080\t"+ "\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083\4\u0084\t\u0084"+ "\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087\t\u0087\4\u0088\t\u0088\4\u0089"+ - "\t\u0089\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3"+ - "\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t"+ - "\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3"+ - "\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20"+ - "\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22"+ - "\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24"+ - "\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25"+ - "\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26"+ - "\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27"+ - "\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\31\3\31"+ - "\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33"+ - "\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35"+ - "\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3 \3"+ - " \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3\""+ - "\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3"+ - "%\3%\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3)\3"+ - ")\3)\3)\3)\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3+\3+\3+\3+\3"+ - ",\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3-\3-\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3"+ - "/\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\61\3\62\3\62\3\62\3\62\3\62"+ - "\3\62\3\62\3\62\3\62\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\65\3\65"+ - "\3\65\3\65\3\65\3\66\3\66\3\66\3\66\3\66\3\67\3\67\3\67\3\67\3\67\38\3"+ - "8\38\38\38\38\39\39\39\39\39\39\39\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3"+ - ";\3;\3<\3<\3<\3<\3<\3<\3<\3<\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3>\3>\3"+ - "?\3?\3?\3?\3?\3?\3?\3?\3@\3@\3@\3@\3A\3A\3A\3A\3A\3B\3B\3B\3B\3B\3B\3"+ - "C\3C\3C\3D\3D\3D\3D\3D\3D\3D\3D\3D\3D\3E\3E\3E\3F\3F\3F\3F\3F\3F\3G\3"+ - "G\3G\3G\3G\3G\3H\3H\3H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3I\3I\3I\3J\3J\3"+ - "J\3J\3J\3K\3K\3K\3K\3K\3K\3L\3L\3L\3L\3L\3L\3M\3M\3M\3M\3M\3M\3N\3N\3"+ - "N\3N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3O\3O\3P\3P\3P\3P\3P\3P\3P\3P\3Q\3Q\3"+ - "Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3R\3S\3S\3S\3S\3T\3T\3T\3T\3T\3T\3U\3U\3U\3"+ - "U\3U\3U\3U\3V\3V\3V\3V\3V\3W\3W\3W\3W\3W\3X\3X\3X\3X\3X\3Y\3Y\3Y\3Z\3"+ - "Z\3Z\3Z\3Z\3[\3[\3[\3[\3[\3[\3\\\3\\\3\\\3\\\3\\\3\\\3]\3]\3]\3]\3]\3"+ - "]\3]\3^\3^\3^\3^\3^\3_\3_\3_\3_\3_\3_\3`\3`\3`\3`\3`\3a\3a\3a\3a\3a\3"+ - "b\3b\3b\3b\3b\3b\3c\3c\3c\3c\3c\3c\3c\3c\3d\3d\3d\3d\3e\3e\3e\3e\3e\3"+ - "e\3e\3f\3f\3f\3g\3g\3g\3h\3h\3h\3h\3i\3i\3i\3i\3i\3i\3j\3j\3k\3k\3l\3"+ - "l\3l\3l\3m\3m\3m\3m\5m\u039c\nm\3n\3n\3o\3o\3o\3p\3p\3q\3q\3q\3r\3r\3"+ - "s\3s\3t\3t\3u\3u\3v\3v\3w\3w\3w\3x\3x\3x\3y\3y\3z\3z\3{\3{\3{\3{\7{\u03c0"+ - "\n{\f{\16{\u03c3\13{\3{\3{\3|\6|\u03c8\n|\r|\16|\u03c9\3}\6}\u03cd\n}"+ - "\r}\16}\u03ce\3}\3}\7}\u03d3\n}\f}\16}\u03d6\13}\3}\3}\6}\u03da\n}\r}"+ - "\16}\u03db\3}\6}\u03df\n}\r}\16}\u03e0\3}\3}\7}\u03e5\n}\f}\16}\u03e8"+ - "\13}\5}\u03ea\n}\3}\3}\3}\3}\6}\u03f0\n}\r}\16}\u03f1\3}\3}\5}\u03f6\n"+ - "}\3~\3~\5~\u03fa\n~\3~\3~\3~\7~\u03ff\n~\f~\16~\u0402\13~\3\177\3\177"+ - "\3\177\3\177\6\177\u0408\n\177\r\177\16\177\u0409\3\u0080\3\u0080\3\u0080"+ - "\6\u0080\u040f\n\u0080\r\u0080\16\u0080\u0410\3\u0081\3\u0081\3\u0081"+ - "\3\u0081\7\u0081\u0417\n\u0081\f\u0081\16\u0081\u041a\13\u0081\3\u0081"+ - "\3\u0081\3\u0082\3\u0082\3\u0082\3\u0082\7\u0082\u0422\n\u0082\f\u0082"+ - "\16\u0082\u0425\13\u0082\3\u0082\3\u0082\3\u0083\3\u0083\5\u0083\u042b"+ - "\n\u0083\3\u0083\6\u0083\u042e\n\u0083\r\u0083\16\u0083\u042f\3\u0084"+ - "\3\u0084\3\u0085\3\u0085\3\u0086\3\u0086\3\u0086\3\u0086\7\u0086\u043a"+ - "\n\u0086\f\u0086\16\u0086\u043d\13\u0086\3\u0086\5\u0086\u0440\n\u0086"+ - "\3\u0086\5\u0086\u0443\n\u0086\3\u0086\3\u0086\3\u0087\3\u0087\3\u0087"+ - "\3\u0087\3\u0087\7\u0087\u044c\n\u0087\f\u0087\16\u0087\u044f\13\u0087"+ - "\3\u0087\3\u0087\3\u0087\3\u0087\3\u0087\3\u0088\6\u0088\u0457\n\u0088"+ - "\r\u0088\16\u0088\u0458\3\u0088\3\u0088\3\u0089\3\u0089\3\u044d\2\u008a"+ - "\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20"+ - "\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37"+ - "= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67m8o"+ - "9q:s;u{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089F\u008bG\u008dH"+ - "\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009dP\u009fQ\u00a1"+ - "R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00afY\u00b1Z\u00b3[\u00b5"+ - "\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7e\u00c9"+ - "f\u00cbg\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9n\u00dbo\u00dd"+ - "p\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9v\u00ebw\u00edx\u00efy\u00f1"+ - "z\u00f3{\u00f5|\u00f7}\u00f9~\u00fb\177\u00fd\u0080\u00ff\u0081\u0101"+ - "\u0082\u0103\u0083\u0105\2\u0107\2\u0109\2\u010b\u0084\u010d\u0085\u010f"+ - "\u0086\u0111\u0087\3\2\13\3\2))\4\2BBaa\3\2$$\3\2bb\4\2--//\3\2\62;\3"+ - "\2C\\\4\2\f\f\17\17\5\2\13\f\17\17\"\"\u047e\2\3\3\2\2\2\2\5\3\2\2\2\2"+ - "\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2"+ - "\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2"+ - "\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2"+ - "\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2"+ - "\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2"+ - "\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2"+ - "M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3"+ - "\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2"+ - "\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2"+ - "s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177"+ - "\3\2\2\2\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2"+ - "\2\2\u0089\3\2\2\2\2\u008b\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091"+ - "\3\2\2\2\2\u0093\3\2\2\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2"+ - "\2\2\u009b\3\2\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3"+ - "\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2"+ - "\2\2\u00ad\3\2\2\2\2\u00af\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5"+ - "\3\2\2\2\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd\3\2\2"+ - "\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7"+ - "\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2"+ - "\2\2\u00d1\3\2\2\2\2\u00d3\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9"+ - "\3\2\2\2\2\u00db\3\2\2\2\2\u00dd\3\2\2\2\2\u00df\3\2\2\2\2\u00e1\3\2\2"+ - "\2\2\u00e3\3\2\2\2\2\u00e5\3\2\2\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb"+ - "\3\2\2\2\2\u00ed\3\2\2\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2"+ - "\2\2\u00f5\3\2\2\2\2\u00f7\3\2\2\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd"+ - "\3\2\2\2\2\u00ff\3\2\2\2\2\u0101\3\2\2\2\2\u0103\3\2\2\2\2\u010b\3\2\2"+ - "\2\2\u010d\3\2\2\2\2\u010f\3\2\2\2\2\u0111\3\2\2\2\3\u0113\3\2\2\2\5\u0115"+ - "\3\2\2\2\7\u0117\3\2\2\2\t\u0119\3\2\2\2\13\u011b\3\2\2\2\r\u011f\3\2"+ - "\2\2\17\u0127\3\2\2\2\21\u0130\3\2\2\2\23\u0134\3\2\2\2\25\u0138\3\2\2"+ - "\2\27\u013b\3\2\2\2\31\u013f\3\2\2\2\33\u0147\3\2\2\2\35\u014a\3\2\2\2"+ - "\37\u014f\3\2\2\2!\u0154\3\2\2\2#\u015c\3\2\2\2%\u0165\3\2\2\2\'\u016d"+ - "\3\2\2\2)\u0175\3\2\2\2+\u0182\3\2\2\2-\u018f\3\2\2\2/\u01a1\3\2\2\2\61"+ - "\u01a5\3\2\2\2\63\u01aa\3\2\2\2\65\u01b0\3\2\2\2\67\u01b5\3\2\2\29\u01be"+ - "\3\2\2\2;\u01c7\3\2\2\2=\u01cc\3\2\2\2?\u01d0\3\2\2\2A\u01d7\3\2\2\2C"+ - "\u01e2\3\2\2\2E\u01e9\3\2\2\2G\u01f1\3\2\2\2I\u01f9\3\2\2\2K\u01ff\3\2"+ - "\2\2M\u0205\3\2\2\2O\u020c\3\2\2\2Q\u0211\3\2\2\2S\u0216\3\2\2\2U\u0220"+ - "\3\2\2\2W\u0229\3\2\2\2Y\u022f\3\2\2\2[\u0236\3\2\2\2]\u023b\3\2\2\2_"+ - "\u0241\3\2\2\2a\u0244\3\2\2\2c\u024a\3\2\2\2e\u0253\3\2\2\2g\u0256\3\2"+ - "\2\2i\u025b\3\2\2\2k\u0260\3\2\2\2m\u0265\3\2\2\2o\u026a\3\2\2\2q\u0270"+ - "\3\2\2\2s\u0277\3\2\2\2u\u027d\3\2\2\2w\u0284\3\2\2\2y\u028c\3\2\2\2{"+ - "\u0292\3\2\2\2}\u0299\3\2\2\2\177\u02a1\3\2\2\2\u0081\u02a5\3\2\2\2\u0083"+ - "\u02aa\3\2\2\2\u0085\u02b0\3\2\2\2\u0087\u02b3\3\2\2\2\u0089\u02bd\3\2"+ - "\2\2\u008b\u02c0\3\2\2\2\u008d\u02c6\3\2\2\2\u008f\u02cc\3\2\2\2\u0091"+ - "\u02d3\3\2\2\2\u0093\u02dc\3\2\2\2\u0095\u02e1\3\2\2\2\u0097\u02e7\3\2"+ - "\2\2\u0099\u02ed\3\2\2\2\u009b\u02f3\3\2\2\2\u009d\u02fb\3\2\2\2\u009f"+ - "\u0302\3\2\2\2\u00a1\u030a\3\2\2\2\u00a3\u0311\3\2\2\2\u00a5\u0316\3\2"+ - "\2\2\u00a7\u031a\3\2\2\2\u00a9\u0320\3\2\2\2\u00ab\u0327\3\2\2\2\u00ad"+ - "\u032c\3\2\2\2\u00af\u0331\3\2\2\2\u00b1\u0336\3\2\2\2\u00b3\u0339\3\2"+ - "\2\2\u00b5\u033e\3\2\2\2\u00b7\u0344\3\2\2\2\u00b9\u034a\3\2\2\2\u00bb"+ - "\u0351\3\2\2\2\u00bd\u0356\3\2\2\2\u00bf\u035c\3\2\2\2\u00c1\u0361\3\2"+ - "\2\2\u00c3\u0366\3\2\2\2\u00c5\u036c\3\2\2\2\u00c7\u0374\3\2\2\2\u00c9"+ - "\u0378\3\2\2\2\u00cb\u037f\3\2\2\2\u00cd\u0382\3\2\2\2\u00cf\u0385\3\2"+ - "\2\2\u00d1\u0389\3\2\2\2\u00d3\u038f\3\2\2\2\u00d5\u0391\3\2\2\2\u00d7"+ - "\u0393\3\2\2\2\u00d9\u039b\3\2\2\2\u00db\u039d\3\2\2\2\u00dd\u039f\3\2"+ - "\2\2\u00df\u03a2\3\2\2\2\u00e1\u03a4\3\2\2\2\u00e3\u03a7\3\2\2\2\u00e5"+ - "\u03a9\3\2\2\2\u00e7\u03ab\3\2\2\2\u00e9\u03ad\3\2\2\2\u00eb\u03af\3\2"+ - "\2\2\u00ed\u03b1\3\2\2\2\u00ef\u03b4\3\2\2\2\u00f1\u03b7\3\2\2\2\u00f3"+ - "\u03b9\3\2\2\2\u00f5\u03bb\3\2\2\2\u00f7\u03c7\3\2\2\2\u00f9\u03f5\3\2"+ - "\2\2\u00fb\u03f9\3\2\2\2\u00fd\u0403\3\2\2\2\u00ff\u040e\3\2\2\2\u0101"+ - "\u0412\3\2\2\2\u0103\u041d\3\2\2\2\u0105\u0428\3\2\2\2\u0107\u0431\3\2"+ - "\2\2\u0109\u0433\3\2\2\2\u010b\u0435\3\2\2\2\u010d\u0446\3\2\2\2\u010f"+ - "\u0456\3\2\2\2\u0111\u045c\3\2\2\2\u0113\u0114\7*\2\2\u0114\4\3\2\2\2"+ - "\u0115\u0116\7+\2\2\u0116\6\3\2\2\2\u0117\u0118\7.\2\2\u0118\b\3\2\2\2"+ - "\u0119\u011a\7<\2\2\u011a\n\3\2\2\2\u011b\u011c\7C\2\2\u011c\u011d\7N"+ - "\2\2\u011d\u011e\7N\2\2\u011e\f\3\2\2\2\u011f\u0120\7C\2\2\u0120\u0121"+ - "\7P\2\2\u0121\u0122\7C\2\2\u0122\u0123\7N\2\2\u0123\u0124\7[\2\2\u0124"+ - "\u0125\7\\\2\2\u0125\u0126\7G\2\2\u0126\16\3\2\2\2\u0127\u0128\7C\2\2"+ - "\u0128\u0129\7P\2\2\u0129\u012a\7C\2\2\u012a\u012b\7N\2\2\u012b\u012c"+ - "\7[\2\2\u012c\u012d\7\\\2\2\u012d\u012e\7G\2\2\u012e\u012f\7F\2\2\u012f"+ - "\20\3\2\2\2\u0130\u0131\7C\2\2\u0131\u0132\7P\2\2\u0132\u0133\7F\2\2\u0133"+ - "\22\3\2\2\2\u0134\u0135\7C\2\2\u0135\u0136\7P\2\2\u0136\u0137\7[\2\2\u0137"+ - "\24\3\2\2\2\u0138\u0139\7C\2\2\u0139\u013a\7U\2\2\u013a\26\3\2\2\2\u013b"+ - "\u013c\7C\2\2\u013c\u013d\7U\2\2\u013d\u013e\7E\2\2\u013e\30\3\2\2\2\u013f"+ - "\u0140\7D\2\2\u0140\u0141\7G\2\2\u0141\u0142\7V\2\2\u0142\u0143\7Y\2\2"+ - "\u0143\u0144\7G\2\2\u0144\u0145\7G\2\2\u0145\u0146\7P\2\2\u0146\32\3\2"+ - "\2\2\u0147\u0148\7D\2\2\u0148\u0149\7[\2\2\u0149\34\3\2\2\2\u014a\u014b"+ - "\7E\2\2\u014b\u014c\7C\2\2\u014c\u014d\7U\2\2\u014d\u014e\7G\2\2\u014e"+ - "\36\3\2\2\2\u014f\u0150\7E\2\2\u0150\u0151\7C\2\2\u0151\u0152\7U\2\2\u0152"+ - "\u0153\7V\2\2\u0153 \3\2\2\2\u0154\u0155\7E\2\2\u0155\u0156\7C\2\2\u0156"+ - "\u0157\7V\2\2\u0157\u0158\7C\2\2\u0158\u0159\7N\2\2\u0159\u015a\7Q\2\2"+ - "\u015a\u015b\7I\2\2\u015b\"\3\2\2\2\u015c\u015d\7E\2\2\u015d\u015e\7C"+ - "\2\2\u015e\u015f\7V\2\2\u015f\u0160\7C\2\2\u0160\u0161\7N\2\2\u0161\u0162"+ - "\7Q\2\2\u0162\u0163\7I\2\2\u0163\u0164\7U\2\2\u0164$\3\2\2\2\u0165\u0166"+ - "\7E\2\2\u0166\u0167\7Q\2\2\u0167\u0168\7N\2\2\u0168\u0169\7W\2\2\u0169"+ - "\u016a\7O\2\2\u016a\u016b\7P\2\2\u016b\u016c\7U\2\2\u016c&\3\2\2\2\u016d"+ - "\u016e\7E\2\2\u016e\u016f\7Q\2\2\u016f\u0170\7P\2\2\u0170\u0171\7X\2\2"+ - "\u0171\u0172\7G\2\2\u0172\u0173\7T\2\2\u0173\u0174\7V\2\2\u0174(\3\2\2"+ - "\2\u0175\u0176\7E\2\2\u0176\u0177\7W\2\2\u0177\u0178\7T\2\2\u0178\u0179"+ - "\7T\2\2\u0179\u017a\7G\2\2\u017a\u017b\7P\2\2\u017b\u017c\7V\2\2\u017c"+ - "\u017d\7a\2\2\u017d\u017e\7F\2\2\u017e\u017f\7C\2\2\u017f\u0180\7V\2\2"+ - "\u0180\u0181\7G\2\2\u0181*\3\2\2\2\u0182\u0183\7E\2\2\u0183\u0184\7W\2"+ - "\2\u0184\u0185\7T\2\2\u0185\u0186\7T\2\2\u0186\u0187\7G\2\2\u0187\u0188"+ - "\7P\2\2\u0188\u0189\7V\2\2\u0189\u018a\7a\2\2\u018a\u018b\7V\2\2\u018b"+ - "\u018c\7K\2\2\u018c\u018d\7O\2\2\u018d\u018e\7G\2\2\u018e,\3\2\2\2\u018f"+ - "\u0190\7E\2\2\u0190\u0191\7W\2\2\u0191\u0192\7T\2\2\u0192\u0193\7T\2\2"+ - "\u0193\u0194\7G\2\2\u0194\u0195\7P\2\2\u0195\u0196\7V\2\2\u0196\u0197"+ - "\7a\2\2\u0197\u0198\7V\2\2\u0198\u0199\7K\2\2\u0199\u019a\7O\2\2\u019a"+ - "\u019b\7G\2\2\u019b\u019c\7U\2\2\u019c\u019d\7V\2\2\u019d\u019e\7C\2\2"+ - "\u019e\u019f\7O\2\2\u019f\u01a0\7R\2\2\u01a0.\3\2\2\2\u01a1\u01a2\7F\2"+ - "\2\u01a2\u01a3\7C\2\2\u01a3\u01a4\7[\2\2\u01a4\60\3\2\2\2\u01a5\u01a6"+ - "\7F\2\2\u01a6\u01a7\7C\2\2\u01a7\u01a8\7[\2\2\u01a8\u01a9\7U\2\2\u01a9"+ - "\62\3\2\2\2\u01aa\u01ab\7F\2\2\u01ab\u01ac\7G\2\2\u01ac\u01ad\7D\2\2\u01ad"+ - "\u01ae\7W\2\2\u01ae\u01af\7I\2\2\u01af\64\3\2\2\2\u01b0\u01b1\7F\2\2\u01b1"+ - "\u01b2\7G\2\2\u01b2\u01b3\7U\2\2\u01b3\u01b4\7E\2\2\u01b4\66\3\2\2\2\u01b5"+ - "\u01b6\7F\2\2\u01b6\u01b7\7G\2\2\u01b7\u01b8\7U\2\2\u01b8\u01b9\7E\2\2"+ - "\u01b9\u01ba\7T\2\2\u01ba\u01bb\7K\2\2\u01bb\u01bc\7D\2\2\u01bc\u01bd"+ - "\7G\2\2\u01bd8\3\2\2\2\u01be\u01bf\7F\2\2\u01bf\u01c0\7K\2\2\u01c0\u01c1"+ - "\7U\2\2\u01c1\u01c2\7V\2\2\u01c2\u01c3\7K\2\2\u01c3\u01c4\7P\2\2\u01c4"+ - "\u01c5\7E\2\2\u01c5\u01c6\7V\2\2\u01c6:\3\2\2\2\u01c7\u01c8\7G\2\2\u01c8"+ - "\u01c9\7N\2\2\u01c9\u01ca\7U\2\2\u01ca\u01cb\7G\2\2\u01cb<\3\2\2\2\u01cc"+ - "\u01cd\7G\2\2\u01cd\u01ce\7P\2\2\u01ce\u01cf\7F\2\2\u01cf>\3\2\2\2\u01d0"+ - "\u01d1\7G\2\2\u01d1\u01d2\7U\2\2\u01d2\u01d3\7E\2\2\u01d3\u01d4\7C\2\2"+ - "\u01d4\u01d5\7R\2\2\u01d5\u01d6\7G\2\2\u01d6@\3\2\2\2\u01d7\u01d8\7G\2"+ - "\2\u01d8\u01d9\7Z\2\2\u01d9\u01da\7G\2\2\u01da\u01db\7E\2\2\u01db\u01dc"+ - "\7W\2\2\u01dc\u01dd\7V\2\2\u01dd\u01de\7C\2\2\u01de\u01df\7D\2\2\u01df"+ - "\u01e0\7N\2\2\u01e0\u01e1\7G\2\2\u01e1B\3\2\2\2\u01e2\u01e3\7G\2\2\u01e3"+ - "\u01e4\7Z\2\2\u01e4\u01e5\7K\2\2\u01e5\u01e6\7U\2\2\u01e6\u01e7\7V\2\2"+ - "\u01e7\u01e8\7U\2\2\u01e8D\3\2\2\2\u01e9\u01ea\7G\2\2\u01ea\u01eb\7Z\2"+ - "\2\u01eb\u01ec\7R\2\2\u01ec\u01ed\7N\2\2\u01ed\u01ee\7C\2\2\u01ee\u01ef"+ - "\7K\2\2\u01ef\u01f0\7P\2\2\u01f0F\3\2\2\2\u01f1\u01f2\7G\2\2\u01f2\u01f3"+ - "\7Z\2\2\u01f3\u01f4\7V\2\2\u01f4\u01f5\7T\2\2\u01f5\u01f6\7C\2\2\u01f6"+ - "\u01f7\7E\2\2\u01f7\u01f8\7V\2\2\u01f8H\3\2\2\2\u01f9\u01fa\7H\2\2\u01fa"+ - "\u01fb\7C\2\2\u01fb\u01fc\7N\2\2\u01fc\u01fd\7U\2\2\u01fd\u01fe\7G\2\2"+ - "\u01feJ\3\2\2\2\u01ff\u0200\7H\2\2\u0200\u0201\7K\2\2\u0201\u0202\7T\2"+ - "\2\u0202\u0203\7U\2\2\u0203\u0204\7V\2\2\u0204L\3\2\2\2\u0205\u0206\7"+ - "H\2\2\u0206\u0207\7Q\2\2\u0207\u0208\7T\2\2\u0208\u0209\7O\2\2\u0209\u020a"+ - "\7C\2\2\u020a\u020b\7V\2\2\u020bN\3\2\2\2\u020c\u020d\7H\2\2\u020d\u020e"+ - "\7T\2\2\u020e\u020f\7Q\2\2\u020f\u0210\7O\2\2\u0210P\3\2\2\2\u0211\u0212"+ - "\7H\2\2\u0212\u0213\7W\2\2\u0213\u0214\7N\2\2\u0214\u0215\7N\2\2\u0215"+ - "R\3\2\2\2\u0216\u0217\7H\2\2\u0217\u0218\7W\2\2\u0218\u0219\7P\2\2\u0219"+ - "\u021a\7E\2\2\u021a\u021b\7V\2\2\u021b\u021c\7K\2\2\u021c\u021d\7Q\2\2"+ - "\u021d\u021e\7P\2\2\u021e\u021f\7U\2\2\u021fT\3\2\2\2\u0220\u0221\7I\2"+ - "\2\u0221\u0222\7T\2\2\u0222\u0223\7C\2\2\u0223\u0224\7R\2\2\u0224\u0225"+ - "\7J\2\2\u0225\u0226\7X\2\2\u0226\u0227\7K\2\2\u0227\u0228\7\\\2\2\u0228"+ - "V\3\2\2\2\u0229\u022a\7I\2\2\u022a\u022b\7T\2\2\u022b\u022c\7Q\2\2\u022c"+ - "\u022d\7W\2\2\u022d\u022e\7R\2\2\u022eX\3\2\2\2\u022f\u0230\7J\2\2\u0230"+ - "\u0231\7C\2\2\u0231\u0232\7X\2\2\u0232\u0233\7K\2\2\u0233\u0234\7P\2\2"+ - "\u0234\u0235\7I\2\2\u0235Z\3\2\2\2\u0236\u0237\7J\2\2\u0237\u0238\7Q\2"+ - "\2\u0238\u0239\7W\2\2\u0239\u023a\7T\2\2\u023a\\\3\2\2\2\u023b\u023c\7"+ - "J\2\2\u023c\u023d\7Q\2\2\u023d\u023e\7W\2\2\u023e\u023f\7T\2\2\u023f\u0240"+ - "\7U\2\2\u0240^\3\2\2\2\u0241\u0242\7K\2\2\u0242\u0243\7P\2\2\u0243`\3"+ - "\2\2\2\u0244\u0245\7K\2\2\u0245\u0246\7P\2\2\u0246\u0247\7P\2\2\u0247"+ - "\u0248\7G\2\2\u0248\u0249\7T\2\2\u0249b\3\2\2\2\u024a\u024b\7K\2\2\u024b"+ - "\u024c\7P\2\2\u024c\u024d\7V\2\2\u024d\u024e\7G\2\2\u024e\u024f\7T\2\2"+ - "\u024f\u0250\7X\2\2\u0250\u0251\7C\2\2\u0251\u0252\7N\2\2\u0252d\3\2\2"+ - "\2\u0253\u0254\7K\2\2\u0254\u0255\7U\2\2\u0255f\3\2\2\2\u0256\u0257\7"+ - "L\2\2\u0257\u0258\7Q\2\2\u0258\u0259\7K\2\2\u0259\u025a\7P\2\2\u025ah"+ - "\3\2\2\2\u025b\u025c\7N\2\2\u025c\u025d\7C\2\2\u025d\u025e\7U\2\2\u025e"+ - "\u025f\7V\2\2\u025fj\3\2\2\2\u0260\u0261\7N\2\2\u0261\u0262\7G\2\2\u0262"+ - "\u0263\7H\2\2\u0263\u0264\7V\2\2\u0264l\3\2\2\2\u0265\u0266\7N\2\2\u0266"+ - "\u0267\7K\2\2\u0267\u0268\7M\2\2\u0268\u0269\7G\2\2\u0269n\3\2\2\2\u026a"+ - "\u026b\7N\2\2\u026b\u026c\7K\2\2\u026c\u026d\7O\2\2\u026d\u026e\7K\2\2"+ - "\u026e\u026f\7V\2\2\u026fp\3\2\2\2\u0270\u0271\7O\2\2\u0271\u0272\7C\2"+ - "\2\u0272\u0273\7R\2\2\u0273\u0274\7R\2\2\u0274\u0275\7G\2\2\u0275\u0276"+ - "\7F\2\2\u0276r\3\2\2\2\u0277\u0278\7O\2\2\u0278\u0279\7C\2\2\u0279\u027a"+ - "\7V\2\2\u027a\u027b\7E\2\2\u027b\u027c\7J\2\2\u027ct\3\2\2\2\u027d\u027e"+ - "\7O\2\2\u027e\u027f\7K\2\2\u027f\u0280\7P\2\2\u0280\u0281\7W\2\2\u0281"+ - "\u0282\7V\2\2\u0282\u0283\7G\2\2\u0283v\3\2\2\2\u0284\u0285\7O\2\2\u0285"+ - "\u0286\7K\2\2\u0286\u0287\7P\2\2\u0287\u0288\7W\2\2\u0288\u0289\7V\2\2"+ - "\u0289\u028a\7G\2\2\u028a\u028b\7U\2\2\u028bx\3\2\2\2\u028c\u028d\7O\2"+ - "\2\u028d\u028e\7Q\2\2\u028e\u028f\7P\2\2\u028f\u0290\7V\2\2\u0290\u0291"+ - "\7J\2\2\u0291z\3\2\2\2\u0292\u0293\7O\2\2\u0293\u0294\7Q\2\2\u0294\u0295"+ - "\7P\2\2\u0295\u0296\7V\2\2\u0296\u0297\7J\2\2\u0297\u0298\7U\2\2\u0298"+ - "|\3\2\2\2\u0299\u029a\7P\2\2\u029a\u029b\7C\2\2\u029b\u029c\7V\2\2\u029c"+ - "\u029d\7W\2\2\u029d\u029e\7T\2\2\u029e\u029f\7C\2\2\u029f\u02a0\7N\2\2"+ - "\u02a0~\3\2\2\2\u02a1\u02a2\7P\2\2\u02a2\u02a3\7Q\2\2\u02a3\u02a4\7V\2"+ - "\2\u02a4\u0080\3\2\2\2\u02a5\u02a6\7P\2\2\u02a6\u02a7\7W\2\2\u02a7\u02a8"+ - "\7N\2\2\u02a8\u02a9\7N\2\2\u02a9\u0082\3\2\2\2\u02aa\u02ab\7P\2\2\u02ab"+ - "\u02ac\7W\2\2\u02ac\u02ad\7N\2\2\u02ad\u02ae\7N\2\2\u02ae\u02af\7U\2\2"+ - "\u02af\u0084\3\2\2\2\u02b0\u02b1\7Q\2\2\u02b1\u02b2\7P\2\2\u02b2\u0086"+ - "\3\2\2\2\u02b3\u02b4\7Q\2\2\u02b4\u02b5\7R\2\2\u02b5\u02b6\7V\2\2\u02b6"+ - "\u02b7\7K\2\2\u02b7\u02b8\7O\2\2\u02b8\u02b9\7K\2\2\u02b9\u02ba\7\\\2"+ - "\2\u02ba\u02bb\7G\2\2\u02bb\u02bc\7F\2\2\u02bc\u0088\3\2\2\2\u02bd\u02be"+ - "\7Q\2\2\u02be\u02bf\7T\2\2\u02bf\u008a\3\2\2\2\u02c0\u02c1\7Q\2\2\u02c1"+ - "\u02c2\7T\2\2\u02c2\u02c3\7F\2\2\u02c3\u02c4\7G\2\2\u02c4\u02c5\7T\2\2"+ - "\u02c5\u008c\3\2\2\2\u02c6\u02c7\7Q\2\2\u02c7\u02c8\7W\2\2\u02c8\u02c9"+ - "\7V\2\2\u02c9\u02ca\7G\2\2\u02ca\u02cb\7T\2\2\u02cb\u008e\3\2\2\2\u02cc"+ - "\u02cd\7R\2\2\u02cd\u02ce\7C\2\2\u02ce\u02cf\7T\2\2\u02cf\u02d0\7U\2\2"+ - "\u02d0\u02d1\7G\2\2\u02d1\u02d2\7F\2\2\u02d2\u0090\3\2\2\2\u02d3\u02d4"+ - "\7R\2\2\u02d4\u02d5\7J\2\2\u02d5\u02d6\7[\2\2\u02d6\u02d7\7U\2\2\u02d7"+ - "\u02d8\7K\2\2\u02d8\u02d9\7E\2\2\u02d9\u02da\7C\2\2\u02da\u02db\7N\2\2"+ - "\u02db\u0092\3\2\2\2\u02dc\u02dd\7R\2\2\u02dd\u02de\7N\2\2\u02de\u02df"+ - "\7C\2\2\u02df\u02e0\7P\2\2\u02e0\u0094\3\2\2\2\u02e1\u02e2\7T\2\2\u02e2"+ - "\u02e3\7K\2\2\u02e3\u02e4\7I\2\2\u02e4\u02e5\7J\2\2\u02e5\u02e6\7V\2\2"+ - "\u02e6\u0096\3\2\2\2\u02e7\u02e8\7T\2\2\u02e8\u02e9\7N\2\2\u02e9\u02ea"+ - "\7K\2\2\u02ea\u02eb\7M\2\2\u02eb\u02ec\7G\2\2\u02ec\u0098\3\2\2\2\u02ed"+ - "\u02ee\7S\2\2\u02ee\u02ef\7W\2\2\u02ef\u02f0\7G\2\2\u02f0\u02f1\7T\2\2"+ - "\u02f1\u02f2\7[\2\2\u02f2\u009a\3\2\2\2\u02f3\u02f4\7U\2\2\u02f4\u02f5"+ - "\7E\2\2\u02f5\u02f6\7J\2\2\u02f6\u02f7\7G\2\2\u02f7\u02f8\7O\2\2\u02f8"+ - "\u02f9\7C\2\2\u02f9\u02fa\7U\2\2\u02fa\u009c\3\2\2\2\u02fb\u02fc\7U\2"+ - "\2\u02fc\u02fd\7G\2\2\u02fd\u02fe\7E\2\2\u02fe\u02ff\7Q\2\2\u02ff\u0300"+ - "\7P\2\2\u0300\u0301\7F\2\2\u0301\u009e\3\2\2\2\u0302\u0303\7U\2\2\u0303"+ - "\u0304\7G\2\2\u0304\u0305\7E\2\2\u0305\u0306\7Q\2\2\u0306\u0307\7P\2\2"+ - "\u0307\u0308\7F\2\2\u0308\u0309\7U\2\2\u0309\u00a0\3\2\2\2\u030a\u030b"+ - "\7U\2\2\u030b\u030c\7G\2\2\u030c\u030d\7N\2\2\u030d\u030e\7G\2\2\u030e"+ - "\u030f\7E\2\2\u030f\u0310\7V\2\2\u0310\u00a2\3\2\2\2\u0311\u0312\7U\2"+ - "\2\u0312\u0313\7J\2\2\u0313\u0314\7Q\2\2\u0314\u0315\7Y\2\2\u0315\u00a4"+ - "\3\2\2\2\u0316\u0317\7U\2\2\u0317\u0318\7[\2\2\u0318\u0319\7U\2\2\u0319"+ - "\u00a6\3\2\2\2\u031a\u031b\7V\2\2\u031b\u031c\7C\2\2\u031c\u031d\7D\2"+ - "\2\u031d\u031e\7N\2\2\u031e\u031f\7G\2\2\u031f\u00a8\3\2\2\2\u0320\u0321"+ - "\7V\2\2\u0321\u0322\7C\2\2\u0322\u0323\7D\2\2\u0323\u0324\7N\2\2\u0324"+ - "\u0325\7G\2\2\u0325\u0326\7U\2\2\u0326\u00aa\3\2\2\2\u0327\u0328\7V\2"+ - "\2\u0328\u0329\7G\2\2\u0329\u032a\7Z\2\2\u032a\u032b\7V\2\2\u032b\u00ac"+ - "\3\2\2\2\u032c\u032d\7V\2\2\u032d\u032e\7J\2\2\u032e\u032f\7G\2\2\u032f"+ - "\u0330\7P\2\2\u0330\u00ae\3\2\2\2\u0331\u0332\7V\2\2\u0332\u0333\7T\2"+ - "\2\u0333\u0334\7W\2\2\u0334\u0335\7G\2\2\u0335\u00b0\3\2\2\2\u0336\u0337"+ - "\7V\2\2\u0337\u0338\7Q\2\2\u0338\u00b2\3\2\2\2\u0339\u033a\7V\2\2\u033a"+ - "\u033b\7[\2\2\u033b\u033c\7R\2\2\u033c\u033d\7G\2\2\u033d\u00b4\3\2\2"+ - "\2\u033e\u033f\7V\2\2\u033f\u0340\7[\2\2\u0340\u0341\7R\2\2\u0341\u0342"+ - "\7G\2\2\u0342\u0343\7U\2\2\u0343\u00b6\3\2\2\2\u0344\u0345\7W\2\2\u0345"+ - "\u0346\7U\2\2\u0346\u0347\7K\2\2\u0347\u0348\7P\2\2\u0348\u0349\7I\2\2"+ - "\u0349\u00b8\3\2\2\2\u034a\u034b\7X\2\2\u034b\u034c\7G\2\2\u034c\u034d"+ - "\7T\2\2\u034d\u034e\7K\2\2\u034e\u034f\7H\2\2\u034f\u0350\7[\2\2\u0350"+ - "\u00ba\3\2\2\2\u0351\u0352\7Y\2\2\u0352\u0353\7J\2\2\u0353\u0354\7G\2"+ - "\2\u0354\u0355\7P\2\2\u0355\u00bc\3\2\2\2\u0356\u0357\7Y\2\2\u0357\u0358"+ - "\7J\2\2\u0358\u0359\7G\2\2\u0359\u035a\7T\2\2\u035a\u035b\7G\2\2\u035b"+ - "\u00be\3\2\2\2\u035c\u035d\7Y\2\2\u035d\u035e\7K\2\2\u035e\u035f\7V\2"+ - "\2\u035f\u0360\7J\2\2\u0360\u00c0\3\2\2\2\u0361\u0362\7[\2\2\u0362\u0363"+ - "\7G\2\2\u0363\u0364\7C\2\2\u0364\u0365\7T\2\2\u0365\u00c2\3\2\2\2\u0366"+ - "\u0367\7[\2\2\u0367\u0368\7G\2\2\u0368\u0369\7C\2\2\u0369\u036a\7T\2\2"+ - "\u036a\u036b\7U\2\2\u036b\u00c4\3\2\2\2\u036c\u036d\7}\2\2\u036d\u036e"+ - "\7G\2\2\u036e\u036f\7U\2\2\u036f\u0370\7E\2\2\u0370\u0371\7C\2\2\u0371"+ - "\u0372\7R\2\2\u0372\u0373\7G\2\2\u0373\u00c6\3\2\2\2\u0374\u0375\7}\2"+ - "\2\u0375\u0376\7H\2\2\u0376\u0377\7P\2\2\u0377\u00c8\3\2\2\2\u0378\u0379"+ - "\7}\2\2\u0379\u037a\7N\2\2\u037a\u037b\7K\2\2\u037b\u037c\7O\2\2\u037c"+ - "\u037d\7K\2\2\u037d\u037e\7V\2\2\u037e\u00ca\3\2\2\2\u037f\u0380\7}\2"+ - "\2\u0380\u0381\7F\2\2\u0381\u00cc\3\2\2\2\u0382\u0383\7}\2\2\u0383\u0384"+ - "\7V\2\2\u0384\u00ce\3\2\2\2\u0385\u0386\7}\2\2\u0386\u0387\7V\2\2\u0387"+ - "\u0388\7U\2\2\u0388\u00d0\3\2\2\2\u0389\u038a\7}\2\2\u038a\u038b\7I\2"+ - "\2\u038b\u038c\7W\2\2\u038c\u038d\7K\2\2\u038d\u038e\7F\2\2\u038e\u00d2"+ - "\3\2\2\2\u038f\u0390\7\177\2\2\u0390\u00d4\3\2\2\2\u0391\u0392\7?\2\2"+ - "\u0392\u00d6\3\2\2\2\u0393\u0394\7>\2\2\u0394\u0395\7?\2\2\u0395\u0396"+ - "\7@\2\2\u0396\u00d8\3\2\2\2\u0397\u0398\7>\2\2\u0398\u039c\7@\2\2\u0399"+ - "\u039a\7#\2\2\u039a\u039c\7?\2\2\u039b\u0397\3\2\2\2\u039b\u0399\3\2\2"+ - "\2\u039c\u00da\3\2\2\2\u039d\u039e\7>\2\2\u039e\u00dc\3\2\2\2\u039f\u03a0"+ - "\7>\2\2\u03a0\u03a1\7?\2\2\u03a1\u00de\3\2\2\2\u03a2\u03a3\7@\2\2\u03a3"+ - "\u00e0\3\2\2\2\u03a4\u03a5\7@\2\2\u03a5\u03a6\7?\2\2\u03a6\u00e2\3\2\2"+ - "\2\u03a7\u03a8\7-\2\2\u03a8\u00e4\3\2\2\2\u03a9\u03aa\7/\2\2\u03aa\u00e6"+ - "\3\2\2\2\u03ab\u03ac\7,\2\2\u03ac\u00e8\3\2\2\2\u03ad\u03ae\7\61\2\2\u03ae"+ - "\u00ea\3\2\2\2\u03af\u03b0\7\'\2\2\u03b0\u00ec\3\2\2\2\u03b1\u03b2\7<"+ - "\2\2\u03b2\u03b3\7<\2\2\u03b3\u00ee\3\2\2\2\u03b4\u03b5\7~\2\2\u03b5\u03b6"+ - "\7~\2\2\u03b6\u00f0\3\2\2\2\u03b7\u03b8\7\60\2\2\u03b8\u00f2\3\2\2\2\u03b9"+ - "\u03ba\7A\2\2\u03ba\u00f4\3\2\2\2\u03bb\u03c1\7)\2\2\u03bc\u03c0\n\2\2"+ - "\2\u03bd\u03be\7)\2\2\u03be\u03c0\7)\2\2\u03bf\u03bc\3\2\2\2\u03bf\u03bd"+ - "\3\2\2\2\u03c0\u03c3\3\2\2\2\u03c1\u03bf\3\2\2\2\u03c1\u03c2\3\2\2\2\u03c2"+ - "\u03c4\3\2\2\2\u03c3\u03c1\3\2\2\2\u03c4\u03c5\7)\2\2\u03c5\u00f6\3\2"+ - "\2\2\u03c6\u03c8\5\u0107\u0084\2\u03c7\u03c6\3\2\2\2\u03c8\u03c9\3\2\2"+ - "\2\u03c9\u03c7\3\2\2\2\u03c9\u03ca\3\2\2\2\u03ca\u00f8\3\2\2\2\u03cb\u03cd"+ - "\5\u0107\u0084\2\u03cc\u03cb\3\2\2\2\u03cd\u03ce\3\2\2\2\u03ce\u03cc\3"+ - "\2\2\2\u03ce\u03cf\3\2\2\2\u03cf\u03d0\3\2\2\2\u03d0\u03d4\5\u00f1y\2"+ - "\u03d1\u03d3\5\u0107\u0084\2\u03d2\u03d1\3\2\2\2\u03d3\u03d6\3\2\2\2\u03d4"+ - "\u03d2\3\2\2\2\u03d4\u03d5\3\2\2\2\u03d5\u03f6\3\2\2\2\u03d6\u03d4\3\2"+ - "\2\2\u03d7\u03d9\5\u00f1y\2\u03d8\u03da\5\u0107\u0084\2\u03d9\u03d8\3"+ - "\2\2\2\u03da\u03db\3\2\2\2\u03db\u03d9\3\2\2\2\u03db\u03dc\3\2\2\2\u03dc"+ - "\u03f6\3\2\2\2\u03dd\u03df\5\u0107\u0084\2\u03de\u03dd\3\2\2\2\u03df\u03e0"+ - "\3\2\2\2\u03e0\u03de\3\2\2\2\u03e0\u03e1\3\2\2\2\u03e1\u03e9\3\2\2\2\u03e2"+ - "\u03e6\5\u00f1y\2\u03e3\u03e5\5\u0107\u0084\2\u03e4\u03e3\3\2\2\2\u03e5"+ - "\u03e8\3\2\2\2\u03e6\u03e4\3\2\2\2\u03e6\u03e7\3\2\2\2\u03e7\u03ea\3\2"+ - "\2\2\u03e8\u03e6\3\2\2\2\u03e9\u03e2\3\2\2\2\u03e9\u03ea\3\2\2\2\u03ea"+ - "\u03eb\3\2\2\2\u03eb\u03ec\5\u0105\u0083\2\u03ec\u03f6\3\2\2\2\u03ed\u03ef"+ - "\5\u00f1y\2\u03ee\u03f0\5\u0107\u0084\2\u03ef\u03ee\3\2\2\2\u03f0\u03f1"+ - "\3\2\2\2\u03f1\u03ef\3\2\2\2\u03f1\u03f2\3\2\2\2\u03f2\u03f3\3\2\2\2\u03f3"+ - "\u03f4\5\u0105\u0083\2\u03f4\u03f6\3\2\2\2\u03f5\u03cc\3\2\2\2\u03f5\u03d7"+ - "\3\2\2\2\u03f5\u03de\3\2\2\2\u03f5\u03ed\3\2\2\2\u03f6\u00fa\3\2\2\2\u03f7"+ - "\u03fa\5\u0109\u0085\2\u03f8\u03fa\7a\2\2\u03f9\u03f7\3\2\2\2\u03f9\u03f8"+ - "\3\2\2\2\u03fa\u0400\3\2\2\2\u03fb\u03ff\5\u0109\u0085\2\u03fc\u03ff\5"+ - "\u0107\u0084\2\u03fd\u03ff\t\3\2\2\u03fe\u03fb\3\2\2\2\u03fe\u03fc\3\2"+ - "\2\2\u03fe\u03fd\3\2\2\2\u03ff\u0402\3\2\2\2\u0400\u03fe\3\2\2\2\u0400"+ - "\u0401\3\2\2\2\u0401\u00fc\3\2\2\2\u0402\u0400\3\2\2\2\u0403\u0407\5\u0107"+ - "\u0084\2\u0404\u0408\5\u0109\u0085\2\u0405\u0408\5\u0107\u0084\2\u0406"+ - "\u0408\t\3\2\2\u0407\u0404\3\2\2\2\u0407\u0405\3\2\2\2\u0407\u0406\3\2"+ - "\2\2\u0408\u0409\3\2\2\2\u0409\u0407\3\2\2\2\u0409\u040a\3\2\2\2\u040a"+ - "\u00fe\3\2\2\2\u040b\u040f\5\u0109\u0085\2\u040c\u040f\5\u0107\u0084\2"+ - "\u040d\u040f\7a\2\2\u040e\u040b\3\2\2\2\u040e\u040c\3\2\2\2\u040e\u040d"+ - "\3\2\2\2\u040f\u0410\3\2\2\2\u0410\u040e\3\2\2\2\u0410\u0411\3\2\2\2\u0411"+ - "\u0100\3\2\2\2\u0412\u0418\7$\2\2\u0413\u0417\n\4\2\2\u0414\u0415\7$\2"+ - "\2\u0415\u0417\7$\2\2\u0416\u0413\3\2\2\2\u0416\u0414\3\2\2\2\u0417\u041a"+ - "\3\2\2\2\u0418\u0416\3\2\2\2\u0418\u0419\3\2\2\2\u0419\u041b\3\2\2\2\u041a"+ - "\u0418\3\2\2\2\u041b\u041c\7$\2\2\u041c\u0102\3\2\2\2\u041d\u0423\7b\2"+ - "\2\u041e\u0422\n\5\2\2\u041f\u0420\7b\2\2\u0420\u0422\7b\2\2\u0421\u041e"+ - "\3\2\2\2\u0421\u041f\3\2\2\2\u0422\u0425\3\2\2\2\u0423\u0421\3\2\2\2\u0423"+ - "\u0424\3\2\2\2\u0424\u0426\3\2\2\2\u0425\u0423\3\2\2\2\u0426\u0427\7b"+ - "\2\2\u0427\u0104\3\2\2\2\u0428\u042a\7G\2\2\u0429\u042b\t\6\2\2\u042a"+ - "\u0429\3\2\2\2\u042a\u042b\3\2\2\2\u042b\u042d\3\2\2\2\u042c\u042e\5\u0107"+ - "\u0084\2\u042d\u042c\3\2\2\2\u042e\u042f\3\2\2\2\u042f\u042d\3\2\2\2\u042f"+ - "\u0430\3\2\2\2\u0430\u0106\3\2\2\2\u0431\u0432\t\7\2\2\u0432\u0108\3\2"+ - "\2\2\u0433\u0434\t\b\2\2\u0434\u010a\3\2\2\2\u0435\u0436\7/\2\2\u0436"+ - "\u0437\7/\2\2\u0437\u043b\3\2\2\2\u0438\u043a\n\t\2\2\u0439\u0438\3\2"+ - "\2\2\u043a\u043d\3\2\2\2\u043b\u0439\3\2\2\2\u043b\u043c\3\2\2\2\u043c"+ - "\u043f\3\2\2\2\u043d\u043b\3\2\2\2\u043e\u0440\7\17\2\2\u043f\u043e\3"+ - "\2\2\2\u043f\u0440\3\2\2\2\u0440\u0442\3\2\2\2\u0441\u0443\7\f\2\2\u0442"+ - "\u0441\3\2\2\2\u0442\u0443\3\2\2\2\u0443\u0444\3\2\2\2\u0444\u0445\b\u0086"+ - "\2\2\u0445\u010c\3\2\2\2\u0446\u0447\7\61\2\2\u0447\u0448\7,\2\2\u0448"+ - "\u044d\3\2\2\2\u0449\u044c\5\u010d\u0087\2\u044a\u044c\13\2\2\2\u044b"+ - "\u0449\3\2\2\2\u044b\u044a\3\2\2\2\u044c\u044f\3\2\2\2\u044d\u044e\3\2"+ - "\2\2\u044d\u044b\3\2\2\2\u044e\u0450\3\2\2\2\u044f\u044d\3\2\2\2\u0450"+ - "\u0451\7,\2\2\u0451\u0452\7\61\2\2\u0452\u0453\3\2\2\2\u0453\u0454\b\u0087"+ - "\2\2\u0454\u010e\3\2\2\2\u0455\u0457\t\n\2\2\u0456\u0455\3\2\2\2\u0457"+ - "\u0458\3\2\2\2\u0458\u0456\3\2\2\2\u0458\u0459\3\2\2\2\u0459\u045a\3\2"+ - "\2\2\u045a\u045b\b\u0088\2\2\u045b\u0110\3\2\2\2\u045c\u045d\13\2\2\2"+ - "\u045d\u0112\3\2\2\2\"\2\u039b\u03bf\u03c1\u03c9\u03ce\u03d4\u03db\u03e0"+ - "\u03e6\u03e9\u03f1\u03f5\u03f9\u03fe\u0400\u0407\u0409\u040e\u0410\u0416"+ - "\u0418\u0421\u0423\u042a\u042f\u043b\u043f\u0442\u044b\u044d\u0458\3\2"+ - "\3\2"; + "\t\u0089\4\u008a\t\u008a\4\u008b\t\u008b\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3"+ + "\5\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b"+ + "\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\f\3"+ + "\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3"+ + "\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3"+ + "\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3"+ + "\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3"+ + "\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3"+ + "\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3"+ + "\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3"+ + "\27\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3"+ + "\32\3\32\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3"+ + "\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3"+ + "\36\3\36\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3"+ + "!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3$\3"+ + "$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3%\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3"+ + "\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3)\3)\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3+\3"+ + "+\3+\3+\3+\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3"+ + "-\3.\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3\60\3\60\3"+ + "\61\3\61\3\61\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63\3\63\3"+ + "\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65\3"+ + "\65\3\66\3\66\3\66\3\66\3\66\3\67\3\67\3\67\3\67\3\67\38\38\38\38\38\3"+ + "9\39\39\39\39\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3;\3;\3<\3<\3<\3<\3<\3"+ + "<\3=\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3>\3>\3>\3?\3?\3?\3?\3?\3?\3@\3"+ + "@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3A\3A\3B\3B\3B\3B\3C\3C\3C\3C\3C\3"+ + "D\3D\3D\3D\3D\3D\3E\3E\3E\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3G\3G\3G\3H\3"+ + "H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3J\3J\3J\3J\3J\3J\3J\3K\3K\3K\3K\3K\3"+ + "K\3K\3K\3K\3L\3L\3L\3L\3L\3M\3M\3M\3M\3M\3M\3N\3N\3N\3N\3N\3N\3O\3O\3"+ + "O\3O\3O\3O\3P\3P\3P\3P\3P\3P\3P\3P\3Q\3Q\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3"+ + "R\3R\3R\3R\3S\3S\3S\3S\3S\3S\3S\3T\3T\3T\3T\3T\3U\3U\3U\3U\3V\3V\3V\3"+ + "V\3V\3V\3W\3W\3W\3W\3W\3W\3W\3X\3X\3X\3X\3X\3Y\3Y\3Y\3Y\3Y\3Z\3Z\3Z\3"+ + "Z\3Z\3[\3[\3[\3\\\3\\\3\\\3\\\3\\\3]\3]\3]\3]\3]\3]\3^\3^\3^\3^\3^\3^"+ + "\3_\3_\3_\3_\3_\3_\3_\3`\3`\3`\3`\3`\3a\3a\3a\3a\3a\3a\3b\3b\3b\3b\3b"+ + "\3c\3c\3c\3c\3c\3d\3d\3d\3d\3d\3d\3e\3e\3e\3e\3e\3e\3e\3e\3f\3f\3f\3f"+ + "\3g\3g\3g\3g\3g\3g\3g\3h\3h\3h\3i\3i\3i\3j\3j\3j\3j\3k\3k\3k\3k\3k\3k"+ + "\3l\3l\3m\3m\3n\3n\3n\3n\3o\3o\3o\3o\5o\u03af\no\3p\3p\3q\3q\3q\3r\3r"+ + "\3s\3s\3s\3t\3t\3u\3u\3v\3v\3w\3w\3x\3x\3y\3y\3y\3z\3z\3z\3{\3{\3|\3|"+ + "\3}\3}\3}\3}\7}\u03d3\n}\f}\16}\u03d6\13}\3}\3}\3~\6~\u03db\n~\r~\16~"+ + "\u03dc\3\177\6\177\u03e0\n\177\r\177\16\177\u03e1\3\177\3\177\7\177\u03e6"+ + "\n\177\f\177\16\177\u03e9\13\177\3\177\3\177\6\177\u03ed\n\177\r\177\16"+ + "\177\u03ee\3\177\6\177\u03f2\n\177\r\177\16\177\u03f3\3\177\3\177\7\177"+ + "\u03f8\n\177\f\177\16\177\u03fb\13\177\5\177\u03fd\n\177\3\177\3\177\3"+ + "\177\3\177\6\177\u0403\n\177\r\177\16\177\u0404\3\177\3\177\5\177\u0409"+ + "\n\177\3\u0080\3\u0080\5\u0080\u040d\n\u0080\3\u0080\3\u0080\3\u0080\7"+ + "\u0080\u0412\n\u0080\f\u0080\16\u0080\u0415\13\u0080\3\u0081\3\u0081\3"+ + "\u0081\3\u0081\6\u0081\u041b\n\u0081\r\u0081\16\u0081\u041c\3\u0082\3"+ + "\u0082\3\u0082\6\u0082\u0422\n\u0082\r\u0082\16\u0082\u0423\3\u0083\3"+ + "\u0083\3\u0083\3\u0083\7\u0083\u042a\n\u0083\f\u0083\16\u0083\u042d\13"+ + "\u0083\3\u0083\3\u0083\3\u0084\3\u0084\3\u0084\3\u0084\7\u0084\u0435\n"+ + "\u0084\f\u0084\16\u0084\u0438\13\u0084\3\u0084\3\u0084\3\u0085\3\u0085"+ + "\5\u0085\u043e\n\u0085\3\u0085\6\u0085\u0441\n\u0085\r\u0085\16\u0085"+ + "\u0442\3\u0086\3\u0086\3\u0087\3\u0087\3\u0088\3\u0088\3\u0088\3\u0088"+ + "\7\u0088\u044d\n\u0088\f\u0088\16\u0088\u0450\13\u0088\3\u0088\5\u0088"+ + "\u0453\n\u0088\3\u0088\5\u0088\u0456\n\u0088\3\u0088\3\u0088\3\u0089\3"+ + "\u0089\3\u0089\3\u0089\3\u0089\7\u0089\u045f\n\u0089\f\u0089\16\u0089"+ + "\u0462\13\u0089\3\u0089\3\u0089\3\u0089\3\u0089\3\u0089\3\u008a\6\u008a"+ + "\u046a\n\u008a\r\u008a\16\u008a\u046b\3\u008a\3\u008a\3\u008b\3\u008b"+ + "\3\u0460\2\u008c\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31"+ + "\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65"+ + "\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64"+ + "g\65i\66k\67m8o9q:s;u{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089"+ + "F\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009d"+ + "P\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00afY\u00b1"+ + "Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5"+ + "d\u00c7e\u00c9f\u00cbg\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9"+ + "n\u00dbo\u00ddp\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9v\u00ebw\u00ed"+ + "x\u00efy\u00f1z\u00f3{\u00f5|\u00f7}\u00f9~\u00fb\177\u00fd\u0080\u00ff"+ + "\u0081\u0101\u0082\u0103\u0083\u0105\u0084\u0107\u0085\u0109\2\u010b\2"+ + "\u010d\2\u010f\u0086\u0111\u0087\u0113\u0088\u0115\u0089\3\2\13\3\2))"+ + "\4\2BBaa\3\2$$\3\2bb\4\2--//\3\2\62;\3\2C\\\4\2\f\f\17\17\5\2\13\f\17"+ + "\17\"\"\u0491\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2"+ + "\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2"+ + "\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2"+ + "\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2"+ + "\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3"+ + "\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2"+ + "\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2"+ + "S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3"+ + "\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2"+ + "\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2"+ + "y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2\2\2\u0083"+ + "\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b\3\2\2"+ + "\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2\2\2\u0095"+ + "\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b\3\2\2\2\2\u009d\3\2\2"+ + "\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7"+ + "\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af\3\2\2"+ + "\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2\2\2\u00b7\3\2\2\2\2\u00b9"+ + "\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2"+ + "\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb"+ + "\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2\2\2\u00d3\3\2\2"+ + "\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9\3\2\2\2\2\u00db\3\2\2\2\2\u00dd"+ + "\3\2\2\2\2\u00df\3\2\2\2\2\u00e1\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5\3\2\2"+ + "\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2\2\2\2\u00ef"+ + "\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2\2\2\u00f5\3\2\2\2\2\u00f7\3\2\2"+ + "\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd\3\2\2\2\2\u00ff\3\2\2\2\2\u0101"+ + "\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2\2\2\2\u0107\3\2\2\2\2\u010f\3\2\2"+ + "\2\2\u0111\3\2\2\2\2\u0113\3\2\2\2\2\u0115\3\2\2\2\3\u0117\3\2\2\2\5\u0119"+ + "\3\2\2\2\7\u011b\3\2\2\2\t\u011d\3\2\2\2\13\u011f\3\2\2\2\r\u0123\3\2"+ + "\2\2\17\u012b\3\2\2\2\21\u0134\3\2\2\2\23\u0138\3\2\2\2\25\u013c\3\2\2"+ + "\2\27\u013f\3\2\2\2\31\u0143\3\2\2\2\33\u014b\3\2\2\2\35\u014e\3\2\2\2"+ + "\37\u0153\3\2\2\2!\u0158\3\2\2\2#\u0160\3\2\2\2%\u0169\3\2\2\2\'\u0171"+ + "\3\2\2\2)\u0179\3\2\2\2+\u0186\3\2\2\2-\u0193\3\2\2\2/\u01a5\3\2\2\2\61"+ + "\u01a9\3\2\2\2\63\u01ae\3\2\2\2\65\u01b4\3\2\2\2\67\u01b9\3\2\2\29\u01c2"+ + "\3\2\2\2;\u01cb\3\2\2\2=\u01d0\3\2\2\2?\u01d4\3\2\2\2A\u01db\3\2\2\2C"+ + "\u01e6\3\2\2\2E\u01ed\3\2\2\2G\u01f5\3\2\2\2I\u01fd\3\2\2\2K\u0203\3\2"+ + "\2\2M\u0209\3\2\2\2O\u0210\3\2\2\2Q\u0215\3\2\2\2S\u021c\3\2\2\2U\u0221"+ + "\3\2\2\2W\u022b\3\2\2\2Y\u0234\3\2\2\2[\u023a\3\2\2\2]\u0241\3\2\2\2_"+ + "\u0246\3\2\2\2a\u024c\3\2\2\2c\u024f\3\2\2\2e\u0257\3\2\2\2g\u025d\3\2"+ + "\2\2i\u0266\3\2\2\2k\u0269\3\2\2\2m\u026e\3\2\2\2o\u0273\3\2\2\2q\u0278"+ + "\3\2\2\2s\u027d\3\2\2\2u\u0283\3\2\2\2w\u028a\3\2\2\2y\u0290\3\2\2\2{"+ + "\u0297\3\2\2\2}\u029f\3\2\2\2\177\u02a5\3\2\2\2\u0081\u02ac\3\2\2\2\u0083"+ + "\u02b4\3\2\2\2\u0085\u02b8\3\2\2\2\u0087\u02bd\3\2\2\2\u0089\u02c3\3\2"+ + "\2\2\u008b\u02c6\3\2\2\2\u008d\u02d0\3\2\2\2\u008f\u02d3\3\2\2\2\u0091"+ + "\u02d9\3\2\2\2\u0093\u02df\3\2\2\2\u0095\u02e6\3\2\2\2\u0097\u02ef\3\2"+ + "\2\2\u0099\u02f4\3\2\2\2\u009b\u02fa\3\2\2\2\u009d\u0300\3\2\2\2\u009f"+ + "\u0306\3\2\2\2\u00a1\u030e\3\2\2\2\u00a3\u0315\3\2\2\2\u00a5\u031d\3\2"+ + "\2\2\u00a7\u0324\3\2\2\2\u00a9\u0329\3\2\2\2\u00ab\u032d\3\2\2\2\u00ad"+ + "\u0333\3\2\2\2\u00af\u033a\3\2\2\2\u00b1\u033f\3\2\2\2\u00b3\u0344\3\2"+ + "\2\2\u00b5\u0349\3\2\2\2\u00b7\u034c\3\2\2\2\u00b9\u0351\3\2\2\2\u00bb"+ + "\u0357\3\2\2\2\u00bd\u035d\3\2\2\2\u00bf\u0364\3\2\2\2\u00c1\u0369\3\2"+ + "\2\2\u00c3\u036f\3\2\2\2\u00c5\u0374\3\2\2\2\u00c7\u0379\3\2\2\2\u00c9"+ + "\u037f\3\2\2\2\u00cb\u0387\3\2\2\2\u00cd\u038b\3\2\2\2\u00cf\u0392\3\2"+ + "\2\2\u00d1\u0395\3\2\2\2\u00d3\u0398\3\2\2\2\u00d5\u039c\3\2\2\2\u00d7"+ + "\u03a2\3\2\2\2\u00d9\u03a4\3\2\2\2\u00db\u03a6\3\2\2\2\u00dd\u03ae\3\2"+ + "\2\2\u00df\u03b0\3\2\2\2\u00e1\u03b2\3\2\2\2\u00e3\u03b5\3\2\2\2\u00e5"+ + "\u03b7\3\2\2\2\u00e7\u03ba\3\2\2\2\u00e9\u03bc\3\2\2\2\u00eb\u03be\3\2"+ + "\2\2\u00ed\u03c0\3\2\2\2\u00ef\u03c2\3\2\2\2\u00f1\u03c4\3\2\2\2\u00f3"+ + "\u03c7\3\2\2\2\u00f5\u03ca\3\2\2\2\u00f7\u03cc\3\2\2\2\u00f9\u03ce\3\2"+ + "\2\2\u00fb\u03da\3\2\2\2\u00fd\u0408\3\2\2\2\u00ff\u040c\3\2\2\2\u0101"+ + "\u0416\3\2\2\2\u0103\u0421\3\2\2\2\u0105\u0425\3\2\2\2\u0107\u0430\3\2"+ + "\2\2\u0109\u043b\3\2\2\2\u010b\u0444\3\2\2\2\u010d\u0446\3\2\2\2\u010f"+ + "\u0448\3\2\2\2\u0111\u0459\3\2\2\2\u0113\u0469\3\2\2\2\u0115\u046f\3\2"+ + "\2\2\u0117\u0118\7*\2\2\u0118\4\3\2\2\2\u0119\u011a\7+\2\2\u011a\6\3\2"+ + "\2\2\u011b\u011c\7.\2\2\u011c\b\3\2\2\2\u011d\u011e\7<\2\2\u011e\n\3\2"+ + "\2\2\u011f\u0120\7C\2\2\u0120\u0121\7N\2\2\u0121\u0122\7N\2\2\u0122\f"+ + "\3\2\2\2\u0123\u0124\7C\2\2\u0124\u0125\7P\2\2\u0125\u0126\7C\2\2\u0126"+ + "\u0127\7N\2\2\u0127\u0128\7[\2\2\u0128\u0129\7\\\2\2\u0129\u012a\7G\2"+ + "\2\u012a\16\3\2\2\2\u012b\u012c\7C\2\2\u012c\u012d\7P\2\2\u012d\u012e"+ + "\7C\2\2\u012e\u012f\7N\2\2\u012f\u0130\7[\2\2\u0130\u0131\7\\\2\2\u0131"+ + "\u0132\7G\2\2\u0132\u0133\7F\2\2\u0133\20\3\2\2\2\u0134\u0135\7C\2\2\u0135"+ + "\u0136\7P\2\2\u0136\u0137\7F\2\2\u0137\22\3\2\2\2\u0138\u0139\7C\2\2\u0139"+ + "\u013a\7P\2\2\u013a\u013b\7[\2\2\u013b\24\3\2\2\2\u013c\u013d\7C\2\2\u013d"+ + "\u013e\7U\2\2\u013e\26\3\2\2\2\u013f\u0140\7C\2\2\u0140\u0141\7U\2\2\u0141"+ + "\u0142\7E\2\2\u0142\30\3\2\2\2\u0143\u0144\7D\2\2\u0144\u0145\7G\2\2\u0145"+ + "\u0146\7V\2\2\u0146\u0147\7Y\2\2\u0147\u0148\7G\2\2\u0148\u0149\7G\2\2"+ + "\u0149\u014a\7P\2\2\u014a\32\3\2\2\2\u014b\u014c\7D\2\2\u014c\u014d\7"+ + "[\2\2\u014d\34\3\2\2\2\u014e\u014f\7E\2\2\u014f\u0150\7C\2\2\u0150\u0151"+ + "\7U\2\2\u0151\u0152\7G\2\2\u0152\36\3\2\2\2\u0153\u0154\7E\2\2\u0154\u0155"+ + "\7C\2\2\u0155\u0156\7U\2\2\u0156\u0157\7V\2\2\u0157 \3\2\2\2\u0158\u0159"+ + "\7E\2\2\u0159\u015a\7C\2\2\u015a\u015b\7V\2\2\u015b\u015c\7C\2\2\u015c"+ + "\u015d\7N\2\2\u015d\u015e\7Q\2\2\u015e\u015f\7I\2\2\u015f\"\3\2\2\2\u0160"+ + "\u0161\7E\2\2\u0161\u0162\7C\2\2\u0162\u0163\7V\2\2\u0163\u0164\7C\2\2"+ + "\u0164\u0165\7N\2\2\u0165\u0166\7Q\2\2\u0166\u0167\7I\2\2\u0167\u0168"+ + "\7U\2\2\u0168$\3\2\2\2\u0169\u016a\7E\2\2\u016a\u016b\7Q\2\2\u016b\u016c"+ + "\7N\2\2\u016c\u016d\7W\2\2\u016d\u016e\7O\2\2\u016e\u016f\7P\2\2\u016f"+ + "\u0170\7U\2\2\u0170&\3\2\2\2\u0171\u0172\7E\2\2\u0172\u0173\7Q\2\2\u0173"+ + "\u0174\7P\2\2\u0174\u0175\7X\2\2\u0175\u0176\7G\2\2\u0176\u0177\7T\2\2"+ + "\u0177\u0178\7V\2\2\u0178(\3\2\2\2\u0179\u017a\7E\2\2\u017a\u017b\7W\2"+ + "\2\u017b\u017c\7T\2\2\u017c\u017d\7T\2\2\u017d\u017e\7G\2\2\u017e\u017f"+ + "\7P\2\2\u017f\u0180\7V\2\2\u0180\u0181\7a\2\2\u0181\u0182\7F\2\2\u0182"+ + "\u0183\7C\2\2\u0183\u0184\7V\2\2\u0184\u0185\7G\2\2\u0185*\3\2\2\2\u0186"+ + "\u0187\7E\2\2\u0187\u0188\7W\2\2\u0188\u0189\7T\2\2\u0189\u018a\7T\2\2"+ + "\u018a\u018b\7G\2\2\u018b\u018c\7P\2\2\u018c\u018d\7V\2\2\u018d\u018e"+ + "\7a\2\2\u018e\u018f\7V\2\2\u018f\u0190\7K\2\2\u0190\u0191\7O\2\2\u0191"+ + "\u0192\7G\2\2\u0192,\3\2\2\2\u0193\u0194\7E\2\2\u0194\u0195\7W\2\2\u0195"+ + "\u0196\7T\2\2\u0196\u0197\7T\2\2\u0197\u0198\7G\2\2\u0198\u0199\7P\2\2"+ + "\u0199\u019a\7V\2\2\u019a\u019b\7a\2\2\u019b\u019c\7V\2\2\u019c\u019d"+ + "\7K\2\2\u019d\u019e\7O\2\2\u019e\u019f\7G\2\2\u019f\u01a0\7U\2\2\u01a0"+ + "\u01a1\7V\2\2\u01a1\u01a2\7C\2\2\u01a2\u01a3\7O\2\2\u01a3\u01a4\7R\2\2"+ + "\u01a4.\3\2\2\2\u01a5\u01a6\7F\2\2\u01a6\u01a7\7C\2\2\u01a7\u01a8\7[\2"+ + "\2\u01a8\60\3\2\2\2\u01a9\u01aa\7F\2\2\u01aa\u01ab\7C\2\2\u01ab\u01ac"+ + "\7[\2\2\u01ac\u01ad\7U\2\2\u01ad\62\3\2\2\2\u01ae\u01af\7F\2\2\u01af\u01b0"+ + "\7G\2\2\u01b0\u01b1\7D\2\2\u01b1\u01b2\7W\2\2\u01b2\u01b3\7I\2\2\u01b3"+ + "\64\3\2\2\2\u01b4\u01b5\7F\2\2\u01b5\u01b6\7G\2\2\u01b6\u01b7\7U\2\2\u01b7"+ + "\u01b8\7E\2\2\u01b8\66\3\2\2\2\u01b9\u01ba\7F\2\2\u01ba\u01bb\7G\2\2\u01bb"+ + "\u01bc\7U\2\2\u01bc\u01bd\7E\2\2\u01bd\u01be\7T\2\2\u01be\u01bf\7K\2\2"+ + "\u01bf\u01c0\7D\2\2\u01c0\u01c1\7G\2\2\u01c18\3\2\2\2\u01c2\u01c3\7F\2"+ + "\2\u01c3\u01c4\7K\2\2\u01c4\u01c5\7U\2\2\u01c5\u01c6\7V\2\2\u01c6\u01c7"+ + "\7K\2\2\u01c7\u01c8\7P\2\2\u01c8\u01c9\7E\2\2\u01c9\u01ca\7V\2\2\u01ca"+ + ":\3\2\2\2\u01cb\u01cc\7G\2\2\u01cc\u01cd\7N\2\2\u01cd\u01ce\7U\2\2\u01ce"+ + "\u01cf\7G\2\2\u01cf<\3\2\2\2\u01d0\u01d1\7G\2\2\u01d1\u01d2\7P\2\2\u01d2"+ + "\u01d3\7F\2\2\u01d3>\3\2\2\2\u01d4\u01d5\7G\2\2\u01d5\u01d6\7U\2\2\u01d6"+ + "\u01d7\7E\2\2\u01d7\u01d8\7C\2\2\u01d8\u01d9\7R\2\2\u01d9\u01da\7G\2\2"+ + "\u01da@\3\2\2\2\u01db\u01dc\7G\2\2\u01dc\u01dd\7Z\2\2\u01dd\u01de\7G\2"+ + "\2\u01de\u01df\7E\2\2\u01df\u01e0\7W\2\2\u01e0\u01e1\7V\2\2\u01e1\u01e2"+ + "\7C\2\2\u01e2\u01e3\7D\2\2\u01e3\u01e4\7N\2\2\u01e4\u01e5\7G\2\2\u01e5"+ + "B\3\2\2\2\u01e6\u01e7\7G\2\2\u01e7\u01e8\7Z\2\2\u01e8\u01e9\7K\2\2\u01e9"+ + "\u01ea\7U\2\2\u01ea\u01eb\7V\2\2\u01eb\u01ec\7U\2\2\u01ecD\3\2\2\2\u01ed"+ + "\u01ee\7G\2\2\u01ee\u01ef\7Z\2\2\u01ef\u01f0\7R\2\2\u01f0\u01f1\7N\2\2"+ + "\u01f1\u01f2\7C\2\2\u01f2\u01f3\7K\2\2\u01f3\u01f4\7P\2\2\u01f4F\3\2\2"+ + "\2\u01f5\u01f6\7G\2\2\u01f6\u01f7\7Z\2\2\u01f7\u01f8\7V\2\2\u01f8\u01f9"+ + "\7T\2\2\u01f9\u01fa\7C\2\2\u01fa\u01fb\7E\2\2\u01fb\u01fc\7V\2\2\u01fc"+ + "H\3\2\2\2\u01fd\u01fe\7H\2\2\u01fe\u01ff\7C\2\2\u01ff\u0200\7N\2\2\u0200"+ + "\u0201\7U\2\2\u0201\u0202\7G\2\2\u0202J\3\2\2\2\u0203\u0204\7H\2\2\u0204"+ + "\u0205\7K\2\2\u0205\u0206\7T\2\2\u0206\u0207\7U\2\2\u0207\u0208\7V\2\2"+ + "\u0208L\3\2\2\2\u0209\u020a\7H\2\2\u020a\u020b\7Q\2\2\u020b\u020c\7T\2"+ + "\2\u020c\u020d\7O\2\2\u020d\u020e\7C\2\2\u020e\u020f\7V\2\2\u020fN\3\2"+ + "\2\2\u0210\u0211\7H\2\2\u0211\u0212\7T\2\2\u0212\u0213\7Q\2\2\u0213\u0214"+ + "\7O\2\2\u0214P\3\2\2\2\u0215\u0216\7H\2\2\u0216\u0217\7T\2\2\u0217\u0218"+ + "\7Q\2\2\u0218\u0219\7\\\2\2\u0219\u021a\7G\2\2\u021a\u021b\7P\2\2\u021b"+ + "R\3\2\2\2\u021c\u021d\7H\2\2\u021d\u021e\7W\2\2\u021e\u021f\7N\2\2\u021f"+ + "\u0220\7N\2\2\u0220T\3\2\2\2\u0221\u0222\7H\2\2\u0222\u0223\7W\2\2\u0223"+ + "\u0224\7P\2\2\u0224\u0225\7E\2\2\u0225\u0226\7V\2\2\u0226\u0227\7K\2\2"+ + "\u0227\u0228\7Q\2\2\u0228\u0229\7P\2\2\u0229\u022a\7U\2\2\u022aV\3\2\2"+ + "\2\u022b\u022c\7I\2\2\u022c\u022d\7T\2\2\u022d\u022e\7C\2\2\u022e\u022f"+ + "\7R\2\2\u022f\u0230\7J\2\2\u0230\u0231\7X\2\2\u0231\u0232\7K\2\2\u0232"+ + "\u0233\7\\\2\2\u0233X\3\2\2\2\u0234\u0235\7I\2\2\u0235\u0236\7T\2\2\u0236"+ + "\u0237\7Q\2\2\u0237\u0238\7W\2\2\u0238\u0239\7R\2\2\u0239Z\3\2\2\2\u023a"+ + "\u023b\7J\2\2\u023b\u023c\7C\2\2\u023c\u023d\7X\2\2\u023d\u023e\7K\2\2"+ + "\u023e\u023f\7P\2\2\u023f\u0240\7I\2\2\u0240\\\3\2\2\2\u0241\u0242\7J"+ + "\2\2\u0242\u0243\7Q\2\2\u0243\u0244\7W\2\2\u0244\u0245\7T\2\2\u0245^\3"+ + "\2\2\2\u0246\u0247\7J\2\2\u0247\u0248\7Q\2\2\u0248\u0249\7W\2\2\u0249"+ + "\u024a\7T\2\2\u024a\u024b\7U\2\2\u024b`\3\2\2\2\u024c\u024d\7K\2\2\u024d"+ + "\u024e\7P\2\2\u024eb\3\2\2\2\u024f\u0250\7K\2\2\u0250\u0251\7P\2\2\u0251"+ + "\u0252\7E\2\2\u0252\u0253\7N\2\2\u0253\u0254\7W\2\2\u0254\u0255\7F\2\2"+ + "\u0255\u0256\7G\2\2\u0256d\3\2\2\2\u0257\u0258\7K\2\2\u0258\u0259\7P\2"+ + "\2\u0259\u025a\7P\2\2\u025a\u025b\7G\2\2\u025b\u025c\7T\2\2\u025cf\3\2"+ + "\2\2\u025d\u025e\7K\2\2\u025e\u025f\7P\2\2\u025f\u0260\7V\2\2\u0260\u0261"+ + "\7G\2\2\u0261\u0262\7T\2\2\u0262\u0263\7X\2\2\u0263\u0264\7C\2\2\u0264"+ + "\u0265\7N\2\2\u0265h\3\2\2\2\u0266\u0267\7K\2\2\u0267\u0268\7U\2\2\u0268"+ + "j\3\2\2\2\u0269\u026a\7L\2\2\u026a\u026b\7Q\2\2\u026b\u026c\7K\2\2\u026c"+ + "\u026d\7P\2\2\u026dl\3\2\2\2\u026e\u026f\7N\2\2\u026f\u0270\7C\2\2\u0270"+ + "\u0271\7U\2\2\u0271\u0272\7V\2\2\u0272n\3\2\2\2\u0273\u0274\7N\2\2\u0274"+ + "\u0275\7G\2\2\u0275\u0276\7H\2\2\u0276\u0277\7V\2\2\u0277p\3\2\2\2\u0278"+ + "\u0279\7N\2\2\u0279\u027a\7K\2\2\u027a\u027b\7M\2\2\u027b\u027c\7G\2\2"+ + "\u027cr\3\2\2\2\u027d\u027e\7N\2\2\u027e\u027f\7K\2\2\u027f\u0280\7O\2"+ + "\2\u0280\u0281\7K\2\2\u0281\u0282\7V\2\2\u0282t\3\2\2\2\u0283\u0284\7"+ + "O\2\2\u0284\u0285\7C\2\2\u0285\u0286\7R\2\2\u0286\u0287\7R\2\2\u0287\u0288"+ + "\7G\2\2\u0288\u0289\7F\2\2\u0289v\3\2\2\2\u028a\u028b\7O\2\2\u028b\u028c"+ + "\7C\2\2\u028c\u028d\7V\2\2\u028d\u028e\7E\2\2\u028e\u028f\7J\2\2\u028f"+ + "x\3\2\2\2\u0290\u0291\7O\2\2\u0291\u0292\7K\2\2\u0292\u0293\7P\2\2\u0293"+ + "\u0294\7W\2\2\u0294\u0295\7V\2\2\u0295\u0296\7G\2\2\u0296z\3\2\2\2\u0297"+ + "\u0298\7O\2\2\u0298\u0299\7K\2\2\u0299\u029a\7P\2\2\u029a\u029b\7W\2\2"+ + "\u029b\u029c\7V\2\2\u029c\u029d\7G\2\2\u029d\u029e\7U\2\2\u029e|\3\2\2"+ + "\2\u029f\u02a0\7O\2\2\u02a0\u02a1\7Q\2\2\u02a1\u02a2\7P\2\2\u02a2\u02a3"+ + "\7V\2\2\u02a3\u02a4\7J\2\2\u02a4~\3\2\2\2\u02a5\u02a6\7O\2\2\u02a6\u02a7"+ + "\7Q\2\2\u02a7\u02a8\7P\2\2\u02a8\u02a9\7V\2\2\u02a9\u02aa\7J\2\2\u02aa"+ + "\u02ab\7U\2\2\u02ab\u0080\3\2\2\2\u02ac\u02ad\7P\2\2\u02ad\u02ae\7C\2"+ + "\2\u02ae\u02af\7V\2\2\u02af\u02b0\7W\2\2\u02b0\u02b1\7T\2\2\u02b1\u02b2"+ + "\7C\2\2\u02b2\u02b3\7N\2\2\u02b3\u0082\3\2\2\2\u02b4\u02b5\7P\2\2\u02b5"+ + "\u02b6\7Q\2\2\u02b6\u02b7\7V\2\2\u02b7\u0084\3\2\2\2\u02b8\u02b9\7P\2"+ + "\2\u02b9\u02ba\7W\2\2\u02ba\u02bb\7N\2\2\u02bb\u02bc\7N\2\2\u02bc\u0086"+ + "\3\2\2\2\u02bd\u02be\7P\2\2\u02be\u02bf\7W\2\2\u02bf\u02c0\7N\2\2\u02c0"+ + "\u02c1\7N\2\2\u02c1\u02c2\7U\2\2\u02c2\u0088\3\2\2\2\u02c3\u02c4\7Q\2"+ + "\2\u02c4\u02c5\7P\2\2\u02c5\u008a\3\2\2\2\u02c6\u02c7\7Q\2\2\u02c7\u02c8"+ + "\7R\2\2\u02c8\u02c9\7V\2\2\u02c9\u02ca\7K\2\2\u02ca\u02cb\7O\2\2\u02cb"+ + "\u02cc\7K\2\2\u02cc\u02cd\7\\\2\2\u02cd\u02ce\7G\2\2\u02ce\u02cf\7F\2"+ + "\2\u02cf\u008c\3\2\2\2\u02d0\u02d1\7Q\2\2\u02d1\u02d2\7T\2\2\u02d2\u008e"+ + "\3\2\2\2\u02d3\u02d4\7Q\2\2\u02d4\u02d5\7T\2\2\u02d5\u02d6\7F\2\2\u02d6"+ + "\u02d7\7G\2\2\u02d7\u02d8\7T\2\2\u02d8\u0090\3\2\2\2\u02d9\u02da\7Q\2"+ + "\2\u02da\u02db\7W\2\2\u02db\u02dc\7V\2\2\u02dc\u02dd\7G\2\2\u02dd\u02de"+ + "\7T\2\2\u02de\u0092\3\2\2\2\u02df\u02e0\7R\2\2\u02e0\u02e1\7C\2\2\u02e1"+ + "\u02e2\7T\2\2\u02e2\u02e3\7U\2\2\u02e3\u02e4\7G\2\2\u02e4\u02e5\7F\2\2"+ + "\u02e5\u0094\3\2\2\2\u02e6\u02e7\7R\2\2\u02e7\u02e8\7J\2\2\u02e8\u02e9"+ + "\7[\2\2\u02e9\u02ea\7U\2\2\u02ea\u02eb\7K\2\2\u02eb\u02ec\7E\2\2\u02ec"+ + "\u02ed\7C\2\2\u02ed\u02ee\7N\2\2\u02ee\u0096\3\2\2\2\u02ef\u02f0\7R\2"+ + "\2\u02f0\u02f1\7N\2\2\u02f1\u02f2\7C\2\2\u02f2\u02f3\7P\2\2\u02f3\u0098"+ + "\3\2\2\2\u02f4\u02f5\7T\2\2\u02f5\u02f6\7K\2\2\u02f6\u02f7\7I\2\2\u02f7"+ + "\u02f8\7J\2\2\u02f8\u02f9\7V\2\2\u02f9\u009a\3\2\2\2\u02fa\u02fb\7T\2"+ + "\2\u02fb\u02fc\7N\2\2\u02fc\u02fd\7K\2\2\u02fd\u02fe\7M\2\2\u02fe\u02ff"+ + "\7G\2\2\u02ff\u009c\3\2\2\2\u0300\u0301\7S\2\2\u0301\u0302\7W\2\2\u0302"+ + "\u0303\7G\2\2\u0303\u0304\7T\2\2\u0304\u0305\7[\2\2\u0305\u009e\3\2\2"+ + "\2\u0306\u0307\7U\2\2\u0307\u0308\7E\2\2\u0308\u0309\7J\2\2\u0309\u030a"+ + "\7G\2\2\u030a\u030b\7O\2\2\u030b\u030c\7C\2\2\u030c\u030d\7U\2\2\u030d"+ + "\u00a0\3\2\2\2\u030e\u030f\7U\2\2\u030f\u0310\7G\2\2\u0310\u0311\7E\2"+ + "\2\u0311\u0312\7Q\2\2\u0312\u0313\7P\2\2\u0313\u0314\7F\2\2\u0314\u00a2"+ + "\3\2\2\2\u0315\u0316\7U\2\2\u0316\u0317\7G\2\2\u0317\u0318\7E\2\2\u0318"+ + "\u0319\7Q\2\2\u0319\u031a\7P\2\2\u031a\u031b\7F\2\2\u031b\u031c\7U\2\2"+ + "\u031c\u00a4\3\2\2\2\u031d\u031e\7U\2\2\u031e\u031f\7G\2\2\u031f\u0320"+ + "\7N\2\2\u0320\u0321\7G\2\2\u0321\u0322\7E\2\2\u0322\u0323\7V\2\2\u0323"+ + "\u00a6\3\2\2\2\u0324\u0325\7U\2\2\u0325\u0326\7J\2\2\u0326\u0327\7Q\2"+ + "\2\u0327\u0328\7Y\2\2\u0328\u00a8\3\2\2\2\u0329\u032a\7U\2\2\u032a\u032b"+ + "\7[\2\2\u032b\u032c\7U\2\2\u032c\u00aa\3\2\2\2\u032d\u032e\7V\2\2\u032e"+ + "\u032f\7C\2\2\u032f\u0330\7D\2\2\u0330\u0331\7N\2\2\u0331\u0332\7G\2\2"+ + "\u0332\u00ac\3\2\2\2\u0333\u0334\7V\2\2\u0334\u0335\7C\2\2\u0335\u0336"+ + "\7D\2\2\u0336\u0337\7N\2\2\u0337\u0338\7G\2\2\u0338\u0339\7U\2\2\u0339"+ + "\u00ae\3\2\2\2\u033a\u033b\7V\2\2\u033b\u033c\7G\2\2\u033c\u033d\7Z\2"+ + "\2\u033d\u033e\7V\2\2\u033e\u00b0\3\2\2\2\u033f\u0340\7V\2\2\u0340\u0341"+ + "\7J\2\2\u0341\u0342\7G\2\2\u0342\u0343\7P\2\2\u0343\u00b2\3\2\2\2\u0344"+ + "\u0345\7V\2\2\u0345\u0346\7T\2\2\u0346\u0347\7W\2\2\u0347\u0348\7G\2\2"+ + "\u0348\u00b4\3\2\2\2\u0349\u034a\7V\2\2\u034a\u034b\7Q\2\2\u034b\u00b6"+ + "\3\2\2\2\u034c\u034d\7V\2\2\u034d\u034e\7[\2\2\u034e\u034f\7R\2\2\u034f"+ + "\u0350\7G\2\2\u0350\u00b8\3\2\2\2\u0351\u0352\7V\2\2\u0352\u0353\7[\2"+ + "\2\u0353\u0354\7R\2\2\u0354\u0355\7G\2\2\u0355\u0356\7U\2\2\u0356\u00ba"+ + "\3\2\2\2\u0357\u0358\7W\2\2\u0358\u0359\7U\2\2\u0359\u035a\7K\2\2\u035a"+ + "\u035b\7P\2\2\u035b\u035c\7I\2\2\u035c\u00bc\3\2\2\2\u035d\u035e\7X\2"+ + "\2\u035e\u035f\7G\2\2\u035f\u0360\7T\2\2\u0360\u0361\7K\2\2\u0361\u0362"+ + "\7H\2\2\u0362\u0363\7[\2\2\u0363\u00be\3\2\2\2\u0364\u0365\7Y\2\2\u0365"+ + "\u0366\7J\2\2\u0366\u0367\7G\2\2\u0367\u0368\7P\2\2\u0368\u00c0\3\2\2"+ + "\2\u0369\u036a\7Y\2\2\u036a\u036b\7J\2\2\u036b\u036c\7G\2\2\u036c\u036d"+ + "\7T\2\2\u036d\u036e\7G\2\2\u036e\u00c2\3\2\2\2\u036f\u0370\7Y\2\2\u0370"+ + "\u0371\7K\2\2\u0371\u0372\7V\2\2\u0372\u0373\7J\2\2\u0373\u00c4\3\2\2"+ + "\2\u0374\u0375\7[\2\2\u0375\u0376\7G\2\2\u0376\u0377\7C\2\2\u0377\u0378"+ + "\7T\2\2\u0378\u00c6\3\2\2\2\u0379\u037a\7[\2\2\u037a\u037b\7G\2\2\u037b"+ + "\u037c\7C\2\2\u037c\u037d\7T\2\2\u037d\u037e\7U\2\2\u037e\u00c8\3\2\2"+ + "\2\u037f\u0380\7}\2\2\u0380\u0381\7G\2\2\u0381\u0382\7U\2\2\u0382\u0383"+ + "\7E\2\2\u0383\u0384\7C\2\2\u0384\u0385\7R\2\2\u0385\u0386\7G\2\2\u0386"+ + "\u00ca\3\2\2\2\u0387\u0388\7}\2\2\u0388\u0389\7H\2\2\u0389\u038a\7P\2"+ + "\2\u038a\u00cc\3\2\2\2\u038b\u038c\7}\2\2\u038c\u038d\7N\2\2\u038d\u038e"+ + "\7K\2\2\u038e\u038f\7O\2\2\u038f\u0390\7K\2\2\u0390\u0391\7V\2\2\u0391"+ + "\u00ce\3\2\2\2\u0392\u0393\7}\2\2\u0393\u0394\7F\2\2\u0394\u00d0\3\2\2"+ + "\2\u0395\u0396\7}\2\2\u0396\u0397\7V\2\2\u0397\u00d2\3\2\2\2\u0398\u0399"+ + "\7}\2\2\u0399\u039a\7V\2\2\u039a\u039b\7U\2\2\u039b\u00d4\3\2\2\2\u039c"+ + "\u039d\7}\2\2\u039d\u039e\7I\2\2\u039e\u039f\7W\2\2\u039f\u03a0\7K\2\2"+ + "\u03a0\u03a1\7F\2\2\u03a1\u00d6\3\2\2\2\u03a2\u03a3\7\177\2\2\u03a3\u00d8"+ + "\3\2\2\2\u03a4\u03a5\7?\2\2\u03a5\u00da\3\2\2\2\u03a6\u03a7\7>\2\2\u03a7"+ + "\u03a8\7?\2\2\u03a8\u03a9\7@\2\2\u03a9\u00dc\3\2\2\2\u03aa\u03ab\7>\2"+ + "\2\u03ab\u03af\7@\2\2\u03ac\u03ad\7#\2\2\u03ad\u03af\7?\2\2\u03ae\u03aa"+ + "\3\2\2\2\u03ae\u03ac\3\2\2\2\u03af\u00de\3\2\2\2\u03b0\u03b1\7>\2\2\u03b1"+ + "\u00e0\3\2\2\2\u03b2\u03b3\7>\2\2\u03b3\u03b4\7?\2\2\u03b4\u00e2\3\2\2"+ + "\2\u03b5\u03b6\7@\2\2\u03b6\u00e4\3\2\2\2\u03b7\u03b8\7@\2\2\u03b8\u03b9"+ + "\7?\2\2\u03b9\u00e6\3\2\2\2\u03ba\u03bb\7-\2\2\u03bb\u00e8\3\2\2\2\u03bc"+ + "\u03bd\7/\2\2\u03bd\u00ea\3\2\2\2\u03be\u03bf\7,\2\2\u03bf\u00ec\3\2\2"+ + "\2\u03c0\u03c1\7\61\2\2\u03c1\u00ee\3\2\2\2\u03c2\u03c3\7\'\2\2\u03c3"+ + "\u00f0\3\2\2\2\u03c4\u03c5\7<\2\2\u03c5\u03c6\7<\2\2\u03c6\u00f2\3\2\2"+ + "\2\u03c7\u03c8\7~\2\2\u03c8\u03c9\7~\2\2\u03c9\u00f4\3\2\2\2\u03ca\u03cb"+ + "\7\60\2\2\u03cb\u00f6\3\2\2\2\u03cc\u03cd\7A\2\2\u03cd\u00f8\3\2\2\2\u03ce"+ + "\u03d4\7)\2\2\u03cf\u03d3\n\2\2\2\u03d0\u03d1\7)\2\2\u03d1\u03d3\7)\2"+ + "\2\u03d2\u03cf\3\2\2\2\u03d2\u03d0\3\2\2\2\u03d3\u03d6\3\2\2\2\u03d4\u03d2"+ + "\3\2\2\2\u03d4\u03d5\3\2\2\2\u03d5\u03d7\3\2\2\2\u03d6\u03d4\3\2\2\2\u03d7"+ + "\u03d8\7)\2\2\u03d8\u00fa\3\2\2\2\u03d9\u03db\5\u010b\u0086\2\u03da\u03d9"+ + "\3\2\2\2\u03db\u03dc\3\2\2\2\u03dc\u03da\3\2\2\2\u03dc\u03dd\3\2\2\2\u03dd"+ + "\u00fc\3\2\2\2\u03de\u03e0\5\u010b\u0086\2\u03df\u03de\3\2\2\2\u03e0\u03e1"+ + "\3\2\2\2\u03e1\u03df\3\2\2\2\u03e1\u03e2\3\2\2\2\u03e2\u03e3\3\2\2\2\u03e3"+ + "\u03e7\5\u00f5{\2\u03e4\u03e6\5\u010b\u0086\2\u03e5\u03e4\3\2\2\2\u03e6"+ + "\u03e9\3\2\2\2\u03e7\u03e5\3\2\2\2\u03e7\u03e8\3\2\2\2\u03e8\u0409\3\2"+ + "\2\2\u03e9\u03e7\3\2\2\2\u03ea\u03ec\5\u00f5{\2\u03eb\u03ed\5\u010b\u0086"+ + "\2\u03ec\u03eb\3\2\2\2\u03ed\u03ee\3\2\2\2\u03ee\u03ec\3\2\2\2\u03ee\u03ef"+ + "\3\2\2\2\u03ef\u0409\3\2\2\2\u03f0\u03f2\5\u010b\u0086\2\u03f1\u03f0\3"+ + "\2\2\2\u03f2\u03f3\3\2\2\2\u03f3\u03f1\3\2\2\2\u03f3\u03f4\3\2\2\2\u03f4"+ + "\u03fc\3\2\2\2\u03f5\u03f9\5\u00f5{\2\u03f6\u03f8\5\u010b\u0086\2\u03f7"+ + "\u03f6\3\2\2\2\u03f8\u03fb\3\2\2\2\u03f9\u03f7\3\2\2\2\u03f9\u03fa\3\2"+ + "\2\2\u03fa\u03fd\3\2\2\2\u03fb\u03f9\3\2\2\2\u03fc\u03f5\3\2\2\2\u03fc"+ + "\u03fd\3\2\2\2\u03fd\u03fe\3\2\2\2\u03fe\u03ff\5\u0109\u0085\2\u03ff\u0409"+ + "\3\2\2\2\u0400\u0402\5\u00f5{\2\u0401\u0403\5\u010b\u0086\2\u0402\u0401"+ + "\3\2\2\2\u0403\u0404\3\2\2\2\u0404\u0402\3\2\2\2\u0404\u0405\3\2\2\2\u0405"+ + "\u0406\3\2\2\2\u0406\u0407\5\u0109\u0085\2\u0407\u0409\3\2\2\2\u0408\u03df"+ + "\3\2\2\2\u0408\u03ea\3\2\2\2\u0408\u03f1\3\2\2\2\u0408\u0400\3\2\2\2\u0409"+ + "\u00fe\3\2\2\2\u040a\u040d\5\u010d\u0087\2\u040b\u040d\7a\2\2\u040c\u040a"+ + "\3\2\2\2\u040c\u040b\3\2\2\2\u040d\u0413\3\2\2\2\u040e\u0412\5\u010d\u0087"+ + "\2\u040f\u0412\5\u010b\u0086\2\u0410\u0412\t\3\2\2\u0411\u040e\3\2\2\2"+ + "\u0411\u040f\3\2\2\2\u0411\u0410\3\2\2\2\u0412\u0415\3\2\2\2\u0413\u0411"+ + "\3\2\2\2\u0413\u0414\3\2\2\2\u0414\u0100\3\2\2\2\u0415\u0413\3\2\2\2\u0416"+ + "\u041a\5\u010b\u0086\2\u0417\u041b\5\u010d\u0087\2\u0418\u041b\5\u010b"+ + "\u0086\2\u0419\u041b\t\3\2\2\u041a\u0417\3\2\2\2\u041a\u0418\3\2\2\2\u041a"+ + "\u0419\3\2\2\2\u041b\u041c\3\2\2\2\u041c\u041a\3\2\2\2\u041c\u041d\3\2"+ + "\2\2\u041d\u0102\3\2\2\2\u041e\u0422\5\u010d\u0087\2\u041f\u0422\5\u010b"+ + "\u0086\2\u0420\u0422\7a\2\2\u0421\u041e\3\2\2\2\u0421\u041f\3\2\2\2\u0421"+ + "\u0420\3\2\2\2\u0422\u0423\3\2\2\2\u0423\u0421\3\2\2\2\u0423\u0424\3\2"+ + "\2\2\u0424\u0104\3\2\2\2\u0425\u042b\7$\2\2\u0426\u042a\n\4\2\2\u0427"+ + "\u0428\7$\2\2\u0428\u042a\7$\2\2\u0429\u0426\3\2\2\2\u0429\u0427\3\2\2"+ + "\2\u042a\u042d\3\2\2\2\u042b\u0429\3\2\2\2\u042b\u042c\3\2\2\2\u042c\u042e"+ + "\3\2\2\2\u042d\u042b\3\2\2\2\u042e\u042f\7$\2\2\u042f\u0106\3\2\2\2\u0430"+ + "\u0436\7b\2\2\u0431\u0435\n\5\2\2\u0432\u0433\7b\2\2\u0433\u0435\7b\2"+ + "\2\u0434\u0431\3\2\2\2\u0434\u0432\3\2\2\2\u0435\u0438\3\2\2\2\u0436\u0434"+ + "\3\2\2\2\u0436\u0437\3\2\2\2\u0437\u0439\3\2\2\2\u0438\u0436\3\2\2\2\u0439"+ + "\u043a\7b\2\2\u043a\u0108\3\2\2\2\u043b\u043d\7G\2\2\u043c\u043e\t\6\2"+ + "\2\u043d\u043c\3\2\2\2\u043d\u043e\3\2\2\2\u043e\u0440\3\2\2\2\u043f\u0441"+ + "\5\u010b\u0086\2\u0440\u043f\3\2\2\2\u0441\u0442\3\2\2\2\u0442\u0440\3"+ + "\2\2\2\u0442\u0443\3\2\2\2\u0443\u010a\3\2\2\2\u0444\u0445\t\7\2\2\u0445"+ + "\u010c\3\2\2\2\u0446\u0447\t\b\2\2\u0447\u010e\3\2\2\2\u0448\u0449\7/"+ + "\2\2\u0449\u044a\7/\2\2\u044a\u044e\3\2\2\2\u044b\u044d\n\t\2\2\u044c"+ + "\u044b\3\2\2\2\u044d\u0450\3\2\2\2\u044e\u044c\3\2\2\2\u044e\u044f\3\2"+ + "\2\2\u044f\u0452\3\2\2\2\u0450\u044e\3\2\2\2\u0451\u0453\7\17\2\2\u0452"+ + "\u0451\3\2\2\2\u0452\u0453\3\2\2\2\u0453\u0455\3\2\2\2\u0454\u0456\7\f"+ + "\2\2\u0455\u0454\3\2\2\2\u0455\u0456\3\2\2\2\u0456\u0457\3\2\2\2\u0457"+ + "\u0458\b\u0088\2\2\u0458\u0110\3\2\2\2\u0459\u045a\7\61\2\2\u045a\u045b"+ + "\7,\2\2\u045b\u0460\3\2\2\2\u045c\u045f\5\u0111\u0089\2\u045d\u045f\13"+ + "\2\2\2\u045e\u045c\3\2\2\2\u045e\u045d\3\2\2\2\u045f\u0462\3\2\2\2\u0460"+ + "\u0461\3\2\2\2\u0460\u045e\3\2\2\2\u0461\u0463\3\2\2\2\u0462\u0460\3\2"+ + "\2\2\u0463\u0464\7,\2\2\u0464\u0465\7\61\2\2\u0465\u0466\3\2\2\2\u0466"+ + "\u0467\b\u0089\2\2\u0467\u0112\3\2\2\2\u0468\u046a\t\n\2\2\u0469\u0468"+ + "\3\2\2\2\u046a\u046b\3\2\2\2\u046b\u0469\3\2\2\2\u046b\u046c\3\2\2\2\u046c"+ + "\u046d\3\2\2\2\u046d\u046e\b\u008a\2\2\u046e\u0114\3\2\2\2\u046f\u0470"+ + "\13\2\2\2\u0470\u0116\3\2\2\2\"\2\u03ae\u03d2\u03d4\u03dc\u03e1\u03e7"+ + "\u03ee\u03f3\u03f9\u03fc\u0404\u0408\u040c\u0411\u0413\u041a\u041c\u0421"+ + "\u0423\u0429\u042b\u0434\u0436\u043d\u0442\u044e\u0452\u0455\u045e\u0460"+ + "\u046b\3\2\3\2"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java index 1ae317552268e..76e0f4654df6d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java @@ -1,27 +1,13 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.xpack.sql.parser; - -import org.antlr.v4.runtime.FailedPredicateException; -import org.antlr.v4.runtime.NoViableAltException; -import org.antlr.v4.runtime.Parser; -import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.RecognitionException; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.RuntimeMetaData; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.Vocabulary; -import org.antlr.v4.runtime.VocabularyImpl; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ParserATNSimulator; -import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.atn.*; import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.tree.ParseTreeListener; -import org.antlr.v4.runtime.tree.ParseTreeVisitor; -import org.antlr.v4.runtime.tree.TerminalNode; - +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.misc.*; +import org.antlr.v4.runtime.tree.*; import java.util.List; +import java.util.Iterator; +import java.util.ArrayList; @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) class SqlBaseParser extends Parser { @@ -36,21 +22,21 @@ class SqlBaseParser extends Parser { COLUMNS=18, CONVERT=19, CURRENT_DATE=20, CURRENT_TIME=21, CURRENT_TIMESTAMP=22, DAY=23, DAYS=24, DEBUG=25, DESC=26, DESCRIBE=27, DISTINCT=28, ELSE=29, END=30, ESCAPE=31, EXECUTABLE=32, EXISTS=33, EXPLAIN=34, EXTRACT=35, FALSE=36, - FIRST=37, FORMAT=38, FROM=39, FULL=40, FUNCTIONS=41, GRAPHVIZ=42, GROUP=43, - HAVING=44, HOUR=45, HOURS=46, IN=47, INNER=48, INTERVAL=49, IS=50, JOIN=51, - LAST=52, LEFT=53, LIKE=54, LIMIT=55, MAPPED=56, MATCH=57, MINUTE=58, MINUTES=59, - MONTH=60, MONTHS=61, NATURAL=62, NOT=63, NULL=64, NULLS=65, ON=66, OPTIMIZED=67, - OR=68, ORDER=69, OUTER=70, PARSED=71, PHYSICAL=72, PLAN=73, RIGHT=74, - RLIKE=75, QUERY=76, SCHEMAS=77, SECOND=78, SECONDS=79, SELECT=80, SHOW=81, - SYS=82, TABLE=83, TABLES=84, TEXT=85, THEN=86, TRUE=87, TO=88, TYPE=89, - TYPES=90, USING=91, VERIFY=92, WHEN=93, WHERE=94, WITH=95, YEAR=96, YEARS=97, - ESCAPE_ESC=98, FUNCTION_ESC=99, LIMIT_ESC=100, DATE_ESC=101, TIME_ESC=102, - TIMESTAMP_ESC=103, GUID_ESC=104, ESC_END=105, EQ=106, NULLEQ=107, NEQ=108, - LT=109, LTE=110, GT=111, GTE=112, PLUS=113, MINUS=114, ASTERISK=115, SLASH=116, - PERCENT=117, CAST_OP=118, CONCAT=119, DOT=120, PARAM=121, STRING=122, - INTEGER_VALUE=123, DECIMAL_VALUE=124, IDENTIFIER=125, DIGIT_IDENTIFIER=126, - TABLE_IDENTIFIER=127, QUOTED_IDENTIFIER=128, BACKQUOTED_IDENTIFIER=129, - SIMPLE_COMMENT=130, BRACKETED_COMMENT=131, WS=132, UNRECOGNIZED=133, DELIMITER=134; + FIRST=37, FORMAT=38, FROM=39, FROZEN=40, FULL=41, FUNCTIONS=42, GRAPHVIZ=43, + GROUP=44, HAVING=45, HOUR=46, HOURS=47, IN=48, INCLUDE=49, INNER=50, INTERVAL=51, + IS=52, JOIN=53, LAST=54, LEFT=55, LIKE=56, LIMIT=57, MAPPED=58, MATCH=59, + MINUTE=60, MINUTES=61, MONTH=62, MONTHS=63, NATURAL=64, NOT=65, NULL=66, + NULLS=67, ON=68, OPTIMIZED=69, OR=70, ORDER=71, OUTER=72, PARSED=73, PHYSICAL=74, + PLAN=75, RIGHT=76, RLIKE=77, QUERY=78, SCHEMAS=79, SECOND=80, SECONDS=81, + SELECT=82, SHOW=83, SYS=84, TABLE=85, TABLES=86, TEXT=87, THEN=88, TRUE=89, + TO=90, TYPE=91, TYPES=92, USING=93, VERIFY=94, WHEN=95, WHERE=96, WITH=97, + YEAR=98, YEARS=99, ESCAPE_ESC=100, FUNCTION_ESC=101, LIMIT_ESC=102, DATE_ESC=103, + TIME_ESC=104, TIMESTAMP_ESC=105, GUID_ESC=106, ESC_END=107, EQ=108, NULLEQ=109, + NEQ=110, LT=111, LTE=112, GT=113, GTE=114, PLUS=115, MINUS=116, ASTERISK=117, + SLASH=118, PERCENT=119, CAST_OP=120, CONCAT=121, DOT=122, PARAM=123, STRING=124, + INTEGER_VALUE=125, DECIMAL_VALUE=126, IDENTIFIER=127, DIGIT_IDENTIFIER=128, + TABLE_IDENTIFIER=129, QUOTED_IDENTIFIER=130, BACKQUOTED_IDENTIFIER=131, + SIMPLE_COMMENT=132, BRACKETED_COMMENT=133, WS=134, UNRECOGNIZED=135, DELIMITER=136; public static final int RULE_singleStatement = 0, RULE_singleExpression = 1, RULE_statement = 2, RULE_query = 3, RULE_queryNoWith = 4, RULE_limitClause = 5, RULE_queryTerm = 6, @@ -91,18 +77,18 @@ class SqlBaseParser extends Parser { "'CURRENT_TIME'", "'CURRENT_TIMESTAMP'", "'DAY'", "'DAYS'", "'DEBUG'", "'DESC'", "'DESCRIBE'", "'DISTINCT'", "'ELSE'", "'END'", "'ESCAPE'", "'EXECUTABLE'", "'EXISTS'", "'EXPLAIN'", "'EXTRACT'", "'FALSE'", "'FIRST'", "'FORMAT'", - "'FROM'", "'FULL'", "'FUNCTIONS'", "'GRAPHVIZ'", "'GROUP'", "'HAVING'", - "'HOUR'", "'HOURS'", "'IN'", "'INNER'", "'INTERVAL'", "'IS'", "'JOIN'", - "'LAST'", "'LEFT'", "'LIKE'", "'LIMIT'", "'MAPPED'", "'MATCH'", "'MINUTE'", - "'MINUTES'", "'MONTH'", "'MONTHS'", "'NATURAL'", "'NOT'", "'NULL'", "'NULLS'", - "'ON'", "'OPTIMIZED'", "'OR'", "'ORDER'", "'OUTER'", "'PARSED'", "'PHYSICAL'", - "'PLAN'", "'RIGHT'", "'RLIKE'", "'QUERY'", "'SCHEMAS'", "'SECOND'", "'SECONDS'", - "'SELECT'", "'SHOW'", "'SYS'", "'TABLE'", "'TABLES'", "'TEXT'", "'THEN'", - "'TRUE'", "'TO'", "'TYPE'", "'TYPES'", "'USING'", "'VERIFY'", "'WHEN'", - "'WHERE'", "'WITH'", "'YEAR'", "'YEARS'", "'{ESCAPE'", "'{FN'", "'{LIMIT'", - "'{D'", "'{T'", "'{TS'", "'{GUID'", "'}'", "'='", "'<=>'", null, "'<'", - "'<='", "'>'", "'>='", "'+'", "'-'", "'*'", "'/'", "'%'", "'::'", "'||'", - "'.'", "'?'" + "'FROM'", "'FROZEN'", "'FULL'", "'FUNCTIONS'", "'GRAPHVIZ'", "'GROUP'", + "'HAVING'", "'HOUR'", "'HOURS'", "'IN'", "'INCLUDE'", "'INNER'", "'INTERVAL'", + "'IS'", "'JOIN'", "'LAST'", "'LEFT'", "'LIKE'", "'LIMIT'", "'MAPPED'", + "'MATCH'", "'MINUTE'", "'MINUTES'", "'MONTH'", "'MONTHS'", "'NATURAL'", + "'NOT'", "'NULL'", "'NULLS'", "'ON'", "'OPTIMIZED'", "'OR'", "'ORDER'", + "'OUTER'", "'PARSED'", "'PHYSICAL'", "'PLAN'", "'RIGHT'", "'RLIKE'", "'QUERY'", + "'SCHEMAS'", "'SECOND'", "'SECONDS'", "'SELECT'", "'SHOW'", "'SYS'", "'TABLE'", + "'TABLES'", "'TEXT'", "'THEN'", "'TRUE'", "'TO'", "'TYPE'", "'TYPES'", + "'USING'", "'VERIFY'", "'WHEN'", "'WHERE'", "'WITH'", "'YEAR'", "'YEARS'", + "'{ESCAPE'", "'{FN'", "'{LIMIT'", "'{D'", "'{T'", "'{TS'", "'{GUID'", + "'}'", "'='", "'<=>'", null, "'<'", "'<='", "'>'", "'>='", "'+'", "'-'", + "'*'", "'/'", "'%'", "'::'", "'||'", "'.'", "'?'" }; private static final String[] _SYMBOLIC_NAMES = { null, null, null, null, null, "ALL", "ANALYZE", "ANALYZED", "AND", "ANY", @@ -110,19 +96,20 @@ class SqlBaseParser extends Parser { "CONVERT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "DAY", "DAYS", "DEBUG", "DESC", "DESCRIBE", "DISTINCT", "ELSE", "END", "ESCAPE", "EXECUTABLE", "EXISTS", "EXPLAIN", "EXTRACT", "FALSE", "FIRST", "FORMAT", - "FROM", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", "HAVING", "HOUR", "HOURS", - "IN", "INNER", "INTERVAL", "IS", "JOIN", "LAST", "LEFT", "LIKE", "LIMIT", - "MAPPED", "MATCH", "MINUTE", "MINUTES", "MONTH", "MONTHS", "NATURAL", - "NOT", "NULL", "NULLS", "ON", "OPTIMIZED", "OR", "ORDER", "OUTER", "PARSED", - "PHYSICAL", "PLAN", "RIGHT", "RLIKE", "QUERY", "SCHEMAS", "SECOND", "SECONDS", - "SELECT", "SHOW", "SYS", "TABLE", "TABLES", "TEXT", "THEN", "TRUE", "TO", - "TYPE", "TYPES", "USING", "VERIFY", "WHEN", "WHERE", "WITH", "YEAR", "YEARS", - "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC", "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", - "GUID_ESC", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", - "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", "CONCAT", - "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", "IDENTIFIER", - "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", - "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", "UNRECOGNIZED", "DELIMITER" + "FROM", "FROZEN", "FULL", "FUNCTIONS", "GRAPHVIZ", "GROUP", "HAVING", + "HOUR", "HOURS", "IN", "INCLUDE", "INNER", "INTERVAL", "IS", "JOIN", "LAST", + "LEFT", "LIKE", "LIMIT", "MAPPED", "MATCH", "MINUTE", "MINUTES", "MONTH", + "MONTHS", "NATURAL", "NOT", "NULL", "NULLS", "ON", "OPTIMIZED", "OR", + "ORDER", "OUTER", "PARSED", "PHYSICAL", "PLAN", "RIGHT", "RLIKE", "QUERY", + "SCHEMAS", "SECOND", "SECONDS", "SELECT", "SHOW", "SYS", "TABLE", "TABLES", + "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES", "USING", "VERIFY", "WHEN", + "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC", + "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", "GUID_ESC", "ESC_END", "EQ", + "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", + "SLASH", "PERCENT", "CAST_OP", "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE", + "DECIMAL_VALUE", "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", + "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", "SIMPLE_COMMENT", "BRACKETED_COMMENT", + "WS", "UNRECOGNIZED", "DELIMITER" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -540,6 +527,8 @@ public static class ShowTablesContext extends StatementContext { public TableIdentifierContext tableIdent; public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } public TerminalNode TABLES() { return getToken(SqlBaseParser.TABLES, 0); } + public TerminalNode INCLUDE() { return getToken(SqlBaseParser.INCLUDE, 0); } + public TerminalNode FROZEN() { return getToken(SqlBaseParser.FROZEN, 0); } public LikePatternContext likePattern() { return getRuleContext(LikePatternContext.class,0); } @@ -586,6 +575,8 @@ public static class ShowColumnsContext extends StatementContext { public TerminalNode COLUMNS() { return getToken(SqlBaseParser.COLUMNS, 0); } public TerminalNode FROM() { return getToken(SqlBaseParser.FROM, 0); } public TerminalNode IN() { return getToken(SqlBaseParser.IN, 0); } + public TerminalNode INCLUDE() { return getToken(SqlBaseParser.INCLUDE, 0); } + public TerminalNode FROZEN() { return getToken(SqlBaseParser.FROZEN, 0); } public LikePatternContext likePattern() { return getRuleContext(LikePatternContext.class,0); } @@ -615,9 +606,9 @@ public final StatementContext statement() throws RecognitionException { enterRule(_localctx, 4, RULE_statement); int _la; try { - setState(217); + setState(229); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,19,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,22,_ctx) ) { case 1: _localctx = new StatementDefaultContext(_localctx); enterOuterAlt(_localctx, 1); @@ -773,10 +764,21 @@ public final StatementContext statement() throws RecognitionException { setState(150); match(TABLES); setState(153); + _la = _input.LA(1); + if (_la==INCLUDE) { + { + setState(151); + match(INCLUDE); + setState(152); + match(FROZEN); + } + } + + setState(157); switch (_input.LA(1)) { case LIKE: { - setState(151); + setState(155); ((ShowTablesContext)_localctx).tableLike = likePattern(); } break; @@ -825,7 +827,7 @@ public final StatementContext statement() throws RecognitionException { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: { - setState(152); + setState(156); ((ShowTablesContext)_localctx).tableIdent = tableIdentifier(); } break; @@ -840,22 +842,33 @@ public final StatementContext statement() throws RecognitionException { _localctx = new ShowColumnsContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(155); + setState(159); match(SHOW); - setState(156); + setState(160); match(COLUMNS); - setState(157); + setState(163); + _la = _input.LA(1); + if (_la==INCLUDE) { + { + setState(161); + match(INCLUDE); + setState(162); + match(FROZEN); + } + } + + setState(165); _la = _input.LA(1); if ( !(_la==FROM || _la==IN) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(160); + setState(168); switch (_input.LA(1)) { case LIKE: { - setState(158); + setState(166); ((ShowColumnsContext)_localctx).tableLike = likePattern(); } break; @@ -904,7 +917,7 @@ public final StatementContext statement() throws RecognitionException { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: { - setState(159); + setState(167); ((ShowColumnsContext)_localctx).tableIdent = tableIdentifier(); } break; @@ -917,18 +930,29 @@ public final StatementContext statement() throws RecognitionException { _localctx = new ShowColumnsContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(162); + setState(170); _la = _input.LA(1); if ( !(_la==DESC || _la==DESCRIBE) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(165); + setState(173); + _la = _input.LA(1); + if (_la==INCLUDE) { + { + setState(171); + match(INCLUDE); + setState(172); + match(FROZEN); + } + } + + setState(177); switch (_input.LA(1)) { case LIKE: { - setState(163); + setState(175); ((ShowColumnsContext)_localctx).tableLike = likePattern(); } break; @@ -977,7 +1001,7 @@ public final StatementContext statement() throws RecognitionException { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: { - setState(164); + setState(176); ((ShowColumnsContext)_localctx).tableIdent = tableIdentifier(); } break; @@ -990,15 +1014,15 @@ public final StatementContext statement() throws RecognitionException { _localctx = new ShowFunctionsContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(167); + setState(179); match(SHOW); - setState(168); + setState(180); match(FUNCTIONS); - setState(170); + setState(182); _la = _input.LA(1); if (_la==LIKE) { { - setState(169); + setState(181); likePattern(); } } @@ -1009,9 +1033,9 @@ public final StatementContext statement() throws RecognitionException { _localctx = new ShowSchemasContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(172); + setState(184); match(SHOW); - setState(173); + setState(185); match(SCHEMAS); } break; @@ -1019,58 +1043,58 @@ public final StatementContext statement() throws RecognitionException { _localctx = new SysTablesContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(174); + setState(186); match(SYS); - setState(175); + setState(187); match(TABLES); - setState(178); + setState(190); _la = _input.LA(1); if (_la==CATALOG) { { - setState(176); + setState(188); match(CATALOG); - setState(177); + setState(189); ((SysTablesContext)_localctx).clusterLike = likePattern(); } } - setState(182); + setState(194); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { case 1: { - setState(180); + setState(192); ((SysTablesContext)_localctx).tableLike = likePattern(); } break; case 2: { - setState(181); + setState(193); ((SysTablesContext)_localctx).tableIdent = tableIdentifier(); } break; } - setState(193); + setState(205); _la = _input.LA(1); if (_la==TYPE) { { - setState(184); + setState(196); match(TYPE); - setState(185); + setState(197); string(); - setState(190); + setState(202); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(186); + setState(198); match(T__2); - setState(187); + setState(199); string(); } } - setState(192); + setState(204); _errHandler.sync(this); _la = _input.LA(1); } @@ -1083,28 +1107,28 @@ public final StatementContext statement() throws RecognitionException { _localctx = new SysColumnsContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(195); + setState(207); match(SYS); - setState(196); + setState(208); match(COLUMNS); - setState(199); + setState(211); _la = _input.LA(1); if (_la==CATALOG) { { - setState(197); + setState(209); match(CATALOG); - setState(198); + setState(210); ((SysColumnsContext)_localctx).cluster = string(); } } - setState(204); + setState(216); switch (_input.LA(1)) { case TABLE: { - setState(201); + setState(213); match(TABLE); - setState(202); + setState(214); ((SysColumnsContext)_localctx).tableLike = likePattern(); } break; @@ -1153,7 +1177,7 @@ public final StatementContext statement() throws RecognitionException { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: { - setState(203); + setState(215); ((SysColumnsContext)_localctx).tableIdent = tableIdentifier(); } break; @@ -1163,11 +1187,11 @@ public final StatementContext statement() throws RecognitionException { default: throw new NoViableAltException(this); } - setState(207); + setState(219); _la = _input.LA(1); if (_la==LIKE) { { - setState(206); + setState(218); ((SysColumnsContext)_localctx).columnPattern = likePattern(); } } @@ -1178,19 +1202,19 @@ public final StatementContext statement() throws RecognitionException { _localctx = new SysTypesContext(_localctx); enterOuterAlt(_localctx, 11); { - setState(209); + setState(221); match(SYS); - setState(210); + setState(222); match(TYPES); - setState(215); + setState(227); _la = _input.LA(1); - if (((((_la - 113)) & ~0x3f) == 0 && ((1L << (_la - 113)) & ((1L << (PLUS - 113)) | (1L << (MINUS - 113)) | (1L << (INTEGER_VALUE - 113)) | (1L << (DECIMAL_VALUE - 113)))) != 0)) { + if (((((_la - 115)) & ~0x3f) == 0 && ((1L << (_la - 115)) & ((1L << (PLUS - 115)) | (1L << (MINUS - 115)) | (1L << (INTEGER_VALUE - 115)) | (1L << (DECIMAL_VALUE - 115)))) != 0)) { { - setState(212); + setState(224); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(211); + setState(223); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -1200,7 +1224,7 @@ public final StatementContext statement() throws RecognitionException { } } - setState(214); + setState(226); ((SysTypesContext)_localctx).type = number(); } } @@ -1257,34 +1281,34 @@ public final QueryContext query() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(228); + setState(240); _la = _input.LA(1); if (_la==WITH) { { - setState(219); + setState(231); match(WITH); - setState(220); + setState(232); namedQuery(); - setState(225); + setState(237); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(221); + setState(233); match(T__2); - setState(222); + setState(234); namedQuery(); } } - setState(227); + setState(239); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(230); + setState(242); queryNoWith(); } } @@ -1340,42 +1364,42 @@ public final QueryNoWithContext queryNoWith() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(232); + setState(244); queryTerm(); - setState(243); + setState(255); _la = _input.LA(1); if (_la==ORDER) { { - setState(233); + setState(245); match(ORDER); - setState(234); + setState(246); match(BY); - setState(235); + setState(247); orderBy(); - setState(240); + setState(252); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(236); + setState(248); match(T__2); - setState(237); + setState(249); orderBy(); } } - setState(242); + setState(254); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(246); + setState(258); _la = _input.LA(1); if (_la==LIMIT || _la==LIMIT_ESC) { { - setState(245); + setState(257); limitClause(); } } @@ -1424,14 +1448,14 @@ public final LimitClauseContext limitClause() throws RecognitionException { enterRule(_localctx, 10, RULE_limitClause); int _la; try { - setState(253); + setState(265); switch (_input.LA(1)) { case LIMIT: enterOuterAlt(_localctx, 1); { - setState(248); + setState(260); match(LIMIT); - setState(249); + setState(261); ((LimitClauseContext)_localctx).limit = _input.LT(1); _la = _input.LA(1); if ( !(_la==ALL || _la==INTEGER_VALUE) ) { @@ -1444,9 +1468,9 @@ public final LimitClauseContext limitClause() throws RecognitionException { case LIMIT_ESC: enterOuterAlt(_localctx, 2); { - setState(250); + setState(262); match(LIMIT_ESC); - setState(251); + setState(263); ((LimitClauseContext)_localctx).limit = _input.LT(1); _la = _input.LA(1); if ( !(_la==ALL || _la==INTEGER_VALUE) ) { @@ -1454,7 +1478,7 @@ public final LimitClauseContext limitClause() throws RecognitionException { } else { consume(); } - setState(252); + setState(264); match(ESC_END); } break; @@ -1527,13 +1551,13 @@ public final QueryTermContext queryTerm() throws RecognitionException { QueryTermContext _localctx = new QueryTermContext(_ctx, getState()); enterRule(_localctx, 12, RULE_queryTerm); try { - setState(260); + setState(272); switch (_input.LA(1)) { case SELECT: _localctx = new QueryPrimaryDefaultContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(255); + setState(267); querySpecification(); } break; @@ -1541,11 +1565,11 @@ public final QueryTermContext queryTerm() throws RecognitionException { _localctx = new SubqueryContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(256); + setState(268); match(T__0); - setState(257); + setState(269); queryNoWith(); - setState(258); + setState(270); match(T__1); } break; @@ -1601,13 +1625,13 @@ public final OrderByContext orderBy() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(262); + setState(274); expression(); - setState(264); + setState(276); _la = _input.LA(1); if (_la==ASC || _la==DESC) { { - setState(263); + setState(275); ((OrderByContext)_localctx).ordering = _input.LT(1); _la = _input.LA(1); if ( !(_la==ASC || _la==DESC) ) { @@ -1618,13 +1642,13 @@ public final OrderByContext orderBy() throws RecognitionException { } } - setState(268); + setState(280); _la = _input.LA(1); if (_la==NULLS) { { - setState(266); + setState(278); match(NULLS); - setState(267); + setState(279); ((OrderByContext)_localctx).nullOrdering = _input.LT(1); _la = _input.LA(1); if ( !(_la==FIRST || _la==LAST) ) { @@ -1703,75 +1727,75 @@ public final QuerySpecificationContext querySpecification() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(270); + setState(282); match(SELECT); - setState(272); + setState(284); _la = _input.LA(1); if (_la==ALL || _la==DISTINCT) { { - setState(271); + setState(283); setQuantifier(); } } - setState(274); + setState(286); selectItem(); - setState(279); + setState(291); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(275); + setState(287); match(T__2); - setState(276); + setState(288); selectItem(); } } - setState(281); + setState(293); _errHandler.sync(this); _la = _input.LA(1); } - setState(283); + setState(295); _la = _input.LA(1); if (_la==FROM) { { - setState(282); + setState(294); fromClause(); } } - setState(287); + setState(299); _la = _input.LA(1); if (_la==WHERE) { { - setState(285); + setState(297); match(WHERE); - setState(286); + setState(298); ((QuerySpecificationContext)_localctx).where = booleanExpression(0); } } - setState(292); + setState(304); _la = _input.LA(1); if (_la==GROUP) { { - setState(289); + setState(301); match(GROUP); - setState(290); + setState(302); match(BY); - setState(291); + setState(303); groupBy(); } } - setState(296); + setState(308); _la = _input.LA(1); if (_la==HAVING) { { - setState(294); + setState(306); match(HAVING); - setState(295); + setState(307); ((QuerySpecificationContext)_localctx).having = booleanExpression(0); } } @@ -1823,23 +1847,23 @@ public final FromClauseContext fromClause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(298); + setState(310); match(FROM); - setState(299); + setState(311); relation(); - setState(304); + setState(316); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(300); + setState(312); match(T__2); - setState(301); + setState(313); relation(); } } - setState(306); + setState(318); _errHandler.sync(this); _la = _input.LA(1); } @@ -1892,30 +1916,30 @@ public final GroupByContext groupBy() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(308); + setState(320); _la = _input.LA(1); if (_la==ALL || _la==DISTINCT) { { - setState(307); + setState(319); setQuantifier(); } } - setState(310); + setState(322); groupingElement(); - setState(315); + setState(327); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(311); + setState(323); match(T__2); - setState(312); + setState(324); groupingElement(); } } - setState(317); + setState(329); _errHandler.sync(this); _la = _input.LA(1); } @@ -1970,7 +1994,7 @@ public final GroupingElementContext groupingElement() throws RecognitionExceptio _localctx = new SingleGroupingSetContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(318); + setState(330); groupingExpressions(); } } @@ -2016,47 +2040,47 @@ public final GroupingExpressionsContext groupingExpressions() throws Recognition enterRule(_localctx, 24, RULE_groupingExpressions); int _la; try { - setState(333); + setState(345); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,40,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,43,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(320); + setState(332); match(T__0); - setState(329); + setState(341); _la = _input.LA(1); - if (((((_la - 1)) & ~0x3f) == 0 && ((1L << (_la - 1)) & ((1L << (T__0 - 1)) | (1L << (ANALYZE - 1)) | (1L << (ANALYZED - 1)) | (1L << (CASE - 1)) | (1L << (CAST - 1)) | (1L << (CATALOGS - 1)) | (1L << (COLUMNS - 1)) | (1L << (CONVERT - 1)) | (1L << (CURRENT_DATE - 1)) | (1L << (CURRENT_TIME - 1)) | (1L << (CURRENT_TIMESTAMP - 1)) | (1L << (DAY - 1)) | (1L << (DEBUG - 1)) | (1L << (EXECUTABLE - 1)) | (1L << (EXISTS - 1)) | (1L << (EXPLAIN - 1)) | (1L << (EXTRACT - 1)) | (1L << (FALSE - 1)) | (1L << (FIRST - 1)) | (1L << (FORMAT - 1)) | (1L << (FULL - 1)) | (1L << (FUNCTIONS - 1)) | (1L << (GRAPHVIZ - 1)) | (1L << (HOUR - 1)) | (1L << (INTERVAL - 1)) | (1L << (LAST - 1)) | (1L << (LEFT - 1)) | (1L << (LIMIT - 1)) | (1L << (MAPPED - 1)) | (1L << (MATCH - 1)) | (1L << (MINUTE - 1)) | (1L << (MONTH - 1)) | (1L << (NOT - 1)) | (1L << (NULL - 1)))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (OPTIMIZED - 67)) | (1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RIGHT - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TRUE - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)) | (1L << (FUNCTION_ESC - 67)) | (1L << (DATE_ESC - 67)) | (1L << (TIME_ESC - 67)) | (1L << (TIMESTAMP_ESC - 67)) | (1L << (GUID_ESC - 67)) | (1L << (PLUS - 67)) | (1L << (MINUS - 67)) | (1L << (ASTERISK - 67)) | (1L << (PARAM - 67)) | (1L << (STRING - 67)) | (1L << (INTEGER_VALUE - 67)) | (1L << (DECIMAL_VALUE - 67)) | (1L << (IDENTIFIER - 67)) | (1L << (DIGIT_IDENTIFIER - 67)) | (1L << (QUOTED_IDENTIFIER - 67)) | (1L << (BACKQUOTED_IDENTIFIER - 67)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (NOT - 65)) | (1L << (NULL - 65)) | (1L << (OPTIMIZED - 65)) | (1L << (PARSED - 65)) | (1L << (PHYSICAL - 65)) | (1L << (PLAN - 65)) | (1L << (RIGHT - 65)) | (1L << (RLIKE - 65)) | (1L << (QUERY - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SECOND - 65)) | (1L << (SHOW - 65)) | (1L << (SYS - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TRUE - 65)) | (1L << (TYPE - 65)) | (1L << (TYPES - 65)) | (1L << (VERIFY - 65)) | (1L << (YEAR - 65)) | (1L << (FUNCTION_ESC - 65)) | (1L << (DATE_ESC - 65)) | (1L << (TIME_ESC - 65)) | (1L << (TIMESTAMP_ESC - 65)) | (1L << (GUID_ESC - 65)) | (1L << (PLUS - 65)) | (1L << (MINUS - 65)) | (1L << (ASTERISK - 65)) | (1L << (PARAM - 65)) | (1L << (STRING - 65)) | (1L << (INTEGER_VALUE - 65)) | (1L << (DECIMAL_VALUE - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)))) != 0) || _la==QUOTED_IDENTIFIER || _la==BACKQUOTED_IDENTIFIER) { { - setState(321); + setState(333); expression(); - setState(326); + setState(338); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(322); + setState(334); match(T__2); - setState(323); + setState(335); expression(); } } - setState(328); + setState(340); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(331); + setState(343); match(T__1); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(332); + setState(344); expression(); } break; @@ -2107,15 +2131,15 @@ public final NamedQueryContext namedQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(335); + setState(347); ((NamedQueryContext)_localctx).name = identifier(); - setState(336); + setState(348); match(AS); - setState(337); + setState(349); match(T__0); - setState(338); + setState(350); queryNoWith(); - setState(339); + setState(351); match(T__1); } } @@ -2159,7 +2183,7 @@ public final SetQuantifierContext setQuantifier() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(341); + setState(353); _la = _input.LA(1); if ( !(_la==ALL || _la==DISTINCT) ) { _errHandler.recoverInline(this); @@ -2222,23 +2246,23 @@ public final SelectItemContext selectItem() throws RecognitionException { _localctx = new SelectExpressionContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(343); + setState(355); expression(); - setState(348); + setState(360); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,42,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,45,_ctx) ) { case 1: { - setState(345); + setState(357); _la = _input.LA(1); if (_la==AS) { { - setState(344); + setState(356); match(AS); } } - setState(347); + setState(359); identifier(); } break; @@ -2292,19 +2316,19 @@ public final RelationContext relation() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(350); + setState(362); relationPrimary(); - setState(354); + setState(366); _errHandler.sync(this); _la = _input.LA(1); - while (((((_la - 40)) & ~0x3f) == 0 && ((1L << (_la - 40)) & ((1L << (FULL - 40)) | (1L << (INNER - 40)) | (1L << (JOIN - 40)) | (1L << (LEFT - 40)) | (1L << (NATURAL - 40)) | (1L << (RIGHT - 40)))) != 0)) { + while (((((_la - 41)) & ~0x3f) == 0 && ((1L << (_la - 41)) & ((1L << (FULL - 41)) | (1L << (INNER - 41)) | (1L << (JOIN - 41)) | (1L << (LEFT - 41)) | (1L << (NATURAL - 41)) | (1L << (RIGHT - 41)))) != 0)) { { { - setState(351); + setState(363); joinRelation(); } } - setState(356); + setState(368); _errHandler.sync(this); _la = _input.LA(1); } @@ -2358,7 +2382,7 @@ public final JoinRelationContext joinRelation() throws RecognitionException { enterRule(_localctx, 34, RULE_joinRelation); int _la; try { - setState(368); + setState(380); switch (_input.LA(1)) { case FULL: case INNER: @@ -2368,18 +2392,18 @@ public final JoinRelationContext joinRelation() throws RecognitionException { enterOuterAlt(_localctx, 1); { { - setState(357); + setState(369); joinType(); } - setState(358); + setState(370); match(JOIN); - setState(359); + setState(371); ((JoinRelationContext)_localctx).right = relationPrimary(); - setState(361); + setState(373); _la = _input.LA(1); if (_la==ON || _la==USING) { { - setState(360); + setState(372); joinCriteria(); } } @@ -2389,13 +2413,13 @@ public final JoinRelationContext joinRelation() throws RecognitionException { case NATURAL: enterOuterAlt(_localctx, 2); { - setState(363); + setState(375); match(NATURAL); - setState(364); + setState(376); joinType(); - setState(365); + setState(377); match(JOIN); - setState(366); + setState(378); ((JoinRelationContext)_localctx).right = relationPrimary(); } break; @@ -2444,17 +2468,17 @@ public final JoinTypeContext joinType() throws RecognitionException { enterRule(_localctx, 36, RULE_joinType); int _la; try { - setState(385); + setState(397); switch (_input.LA(1)) { case INNER: case JOIN: enterOuterAlt(_localctx, 1); { - setState(371); + setState(383); _la = _input.LA(1); if (_la==INNER) { { - setState(370); + setState(382); match(INNER); } } @@ -2464,13 +2488,13 @@ public final JoinTypeContext joinType() throws RecognitionException { case LEFT: enterOuterAlt(_localctx, 2); { - setState(373); + setState(385); match(LEFT); - setState(375); + setState(387); _la = _input.LA(1); if (_la==OUTER) { { - setState(374); + setState(386); match(OUTER); } } @@ -2480,13 +2504,13 @@ public final JoinTypeContext joinType() throws RecognitionException { case RIGHT: enterOuterAlt(_localctx, 3); { - setState(377); + setState(389); match(RIGHT); - setState(379); + setState(391); _la = _input.LA(1); if (_la==OUTER) { { - setState(378); + setState(390); match(OUTER); } } @@ -2496,13 +2520,13 @@ public final JoinTypeContext joinType() throws RecognitionException { case FULL: enterOuterAlt(_localctx, 4); { - setState(381); + setState(393); match(FULL); - setState(383); + setState(395); _la = _input.LA(1); if (_la==OUTER) { { - setState(382); + setState(394); match(OUTER); } } @@ -2560,43 +2584,43 @@ public final JoinCriteriaContext joinCriteria() throws RecognitionException { enterRule(_localctx, 38, RULE_joinCriteria); int _la; try { - setState(401); + setState(413); switch (_input.LA(1)) { case ON: enterOuterAlt(_localctx, 1); { - setState(387); + setState(399); match(ON); - setState(388); + setState(400); booleanExpression(0); } break; case USING: enterOuterAlt(_localctx, 2); { - setState(389); + setState(401); match(USING); - setState(390); + setState(402); match(T__0); - setState(391); + setState(403); identifier(); - setState(396); + setState(408); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(392); + setState(404); match(T__2); - setState(393); + setState(405); identifier(); } } - setState(398); + setState(410); _errHandler.sync(this); _la = _input.LA(1); } - setState(399); + setState(411); match(T__1); } break; @@ -2676,6 +2700,7 @@ public static class TableNameContext extends RelationPrimaryContext { public TableIdentifierContext tableIdentifier() { return getRuleContext(TableIdentifierContext.class,0); } + public TerminalNode FROZEN() { return getToken(SqlBaseParser.FROZEN, 0); } public QualifiedNameContext qualifiedName() { return getRuleContext(QualifiedNameContext.class,0); } @@ -2701,30 +2726,39 @@ public final RelationPrimaryContext relationPrimary() throws RecognitionExceptio enterRule(_localctx, 40, RULE_relationPrimary); int _la; try { - setState(428); + setState(443); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,59,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,63,_ctx) ) { case 1: _localctx = new TableNameContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(403); + setState(416); + _la = _input.LA(1); + if (_la==FROZEN) { + { + setState(415); + match(FROZEN); + } + } + + setState(418); tableIdentifier(); - setState(408); + setState(423); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,54,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,58,_ctx) ) { case 1: { - setState(405); + setState(420); _la = _input.LA(1); if (_la==AS) { { - setState(404); + setState(419); match(AS); } } - setState(407); + setState(422); qualifiedName(); } break; @@ -2735,27 +2769,27 @@ public final RelationPrimaryContext relationPrimary() throws RecognitionExceptio _localctx = new AliasedQueryContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(410); + setState(425); match(T__0); - setState(411); + setState(426); queryNoWith(); - setState(412); + setState(427); match(T__1); - setState(417); + setState(432); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,56,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,60,_ctx) ) { case 1: { - setState(414); + setState(429); _la = _input.LA(1); if (_la==AS) { { - setState(413); + setState(428); match(AS); } } - setState(416); + setState(431); qualifiedName(); } break; @@ -2766,27 +2800,27 @@ public final RelationPrimaryContext relationPrimary() throws RecognitionExceptio _localctx = new AliasedRelationContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(419); + setState(434); match(T__0); - setState(420); + setState(435); relation(); - setState(421); + setState(436); match(T__1); - setState(426); + setState(441); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,58,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,62,_ctx) ) { case 1: { - setState(423); + setState(438); _la = _input.LA(1); if (_la==AS) { { - setState(422); + setState(437); match(AS); } } - setState(425); + setState(440); qualifiedName(); } break; @@ -2835,7 +2869,7 @@ public final ExpressionContext expression() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(430); + setState(445); booleanExpression(0); } } @@ -3043,18 +3077,18 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc int _alt; enterOuterAlt(_localctx, 1); { - setState(463); + setState(478); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,60,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,64,_ctx) ) { case 1: { _localctx = new LogicalNotContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(433); + setState(448); match(NOT); - setState(434); + setState(449); booleanExpression(8); } break; @@ -3063,13 +3097,13 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new ExistsContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(435); + setState(450); match(EXISTS); - setState(436); + setState(451); match(T__0); - setState(437); + setState(452); query(); - setState(438); + setState(453); match(T__1); } break; @@ -3078,15 +3112,15 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new StringQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(440); + setState(455); match(QUERY); - setState(441); + setState(456); match(T__0); - setState(442); + setState(457); ((StringQueryContext)_localctx).queryString = string(); - setState(443); + setState(458); matchQueryOptions(); - setState(444); + setState(459); match(T__1); } break; @@ -3095,19 +3129,19 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new MatchQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(446); + setState(461); match(MATCH); - setState(447); + setState(462); match(T__0); - setState(448); + setState(463); ((MatchQueryContext)_localctx).singleField = qualifiedName(); - setState(449); + setState(464); match(T__2); - setState(450); + setState(465); ((MatchQueryContext)_localctx).queryString = string(); - setState(451); + setState(466); matchQueryOptions(); - setState(452); + setState(467); match(T__1); } break; @@ -3116,19 +3150,19 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new MultiMatchQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(454); + setState(469); match(MATCH); - setState(455); + setState(470); match(T__0); - setState(456); + setState(471); ((MultiMatchQueryContext)_localctx).multiFields = string(); - setState(457); + setState(472); match(T__2); - setState(458); + setState(473); ((MultiMatchQueryContext)_localctx).queryString = string(); - setState(459); + setState(474); matchQueryOptions(); - setState(460); + setState(475); match(T__1); } break; @@ -3137,33 +3171,33 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new BooleanDefaultContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(462); + setState(477); predicated(); } break; } _ctx.stop = _input.LT(-1); - setState(473); + setState(488); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,62,_ctx); + _alt = getInterpreter().adaptivePredict(_input,66,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(471); + setState(486); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,61,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,65,_ctx) ) { case 1: { _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(465); + setState(480); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(466); + setState(481); ((LogicalBinaryContext)_localctx).operator = match(AND); - setState(467); + setState(482); ((LogicalBinaryContext)_localctx).right = booleanExpression(3); } break; @@ -3172,20 +3206,20 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(468); + setState(483); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(469); + setState(484); ((LogicalBinaryContext)_localctx).operator = match(OR); - setState(470); + setState(485); ((LogicalBinaryContext)_localctx).right = booleanExpression(2); } break; } } } - setState(475); + setState(490); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,62,_ctx); + _alt = getInterpreter().adaptivePredict(_input,66,_ctx); } } } @@ -3233,19 +3267,19 @@ public final MatchQueryOptionsContext matchQueryOptions() throws RecognitionExce try { enterOuterAlt(_localctx, 1); { - setState(480); + setState(495); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(476); + setState(491); match(T__2); - setState(477); + setState(492); string(); } } - setState(482); + setState(497); _errHandler.sync(this); _la = _input.LA(1); } @@ -3294,14 +3328,14 @@ public final PredicatedContext predicated() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(483); + setState(498); valueExpression(0); - setState(485); + setState(500); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,64,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,68,_ctx) ) { case 1: { - setState(484); + setState(499); predicate(); } break; @@ -3371,142 +3405,142 @@ public final PredicateContext predicate() throws RecognitionException { enterRule(_localctx, 50, RULE_predicate); int _la; try { - setState(533); + setState(548); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,72,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,76,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(488); + setState(503); _la = _input.LA(1); if (_la==NOT) { { - setState(487); + setState(502); match(NOT); } } - setState(490); + setState(505); ((PredicateContext)_localctx).kind = match(BETWEEN); - setState(491); + setState(506); ((PredicateContext)_localctx).lower = valueExpression(0); - setState(492); + setState(507); match(AND); - setState(493); + setState(508); ((PredicateContext)_localctx).upper = valueExpression(0); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(496); + setState(511); _la = _input.LA(1); if (_la==NOT) { { - setState(495); + setState(510); match(NOT); } } - setState(498); + setState(513); ((PredicateContext)_localctx).kind = match(IN); - setState(499); + setState(514); match(T__0); - setState(500); + setState(515); valueExpression(0); - setState(505); + setState(520); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(501); + setState(516); match(T__2); - setState(502); + setState(517); valueExpression(0); } } - setState(507); + setState(522); _errHandler.sync(this); _la = _input.LA(1); } - setState(508); + setState(523); match(T__1); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(511); + setState(526); _la = _input.LA(1); if (_la==NOT) { { - setState(510); + setState(525); match(NOT); } } - setState(513); + setState(528); ((PredicateContext)_localctx).kind = match(IN); - setState(514); + setState(529); match(T__0); - setState(515); + setState(530); query(); - setState(516); + setState(531); match(T__1); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(519); + setState(534); _la = _input.LA(1); if (_la==NOT) { { - setState(518); + setState(533); match(NOT); } } - setState(521); + setState(536); ((PredicateContext)_localctx).kind = match(LIKE); - setState(522); + setState(537); pattern(); } break; case 5: enterOuterAlt(_localctx, 5); { - setState(524); + setState(539); _la = _input.LA(1); if (_la==NOT) { { - setState(523); + setState(538); match(NOT); } } - setState(526); + setState(541); ((PredicateContext)_localctx).kind = match(RLIKE); - setState(527); + setState(542); ((PredicateContext)_localctx).regex = string(); } break; case 6: enterOuterAlt(_localctx, 6); { - setState(528); + setState(543); match(IS); - setState(530); + setState(545); _la = _input.LA(1); if (_la==NOT) { { - setState(529); + setState(544); match(NOT); } } - setState(532); + setState(547); ((PredicateContext)_localctx).kind = match(NULL); } break; @@ -3553,9 +3587,9 @@ public final LikePatternContext likePattern() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(535); + setState(550); match(LIKE); - setState(536); + setState(551); pattern(); } } @@ -3603,14 +3637,14 @@ public final PatternContext pattern() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(538); + setState(553); ((PatternContext)_localctx).value = string(); - setState(540); + setState(555); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,73,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,77,_ctx) ) { case 1: { - setState(539); + setState(554); patternEscape(); } break; @@ -3658,25 +3692,25 @@ public final PatternEscapeContext patternEscape() throws RecognitionException { PatternEscapeContext _localctx = new PatternEscapeContext(_ctx, getState()); enterRule(_localctx, 56, RULE_patternEscape); try { - setState(548); + setState(563); switch (_input.LA(1)) { case ESCAPE: enterOuterAlt(_localctx, 1); { - setState(542); + setState(557); match(ESCAPE); - setState(543); + setState(558); ((PatternEscapeContext)_localctx).escape = string(); } break; case ESCAPE_ESC: enterOuterAlt(_localctx, 2); { - setState(544); + setState(559); match(ESCAPE_ESC); - setState(545); + setState(560); ((PatternEscapeContext)_localctx).escape = string(); - setState(546); + setState(561); match(ESC_END); } break; @@ -3821,7 +3855,7 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti int _alt; enterOuterAlt(_localctx, 1); { - setState(554); + setState(569); switch (_input.LA(1)) { case T__0: case ANALYZE: @@ -3891,7 +3925,7 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti _ctx = _localctx; _prevctx = _localctx; - setState(551); + setState(566); primaryExpression(0); } break; @@ -3901,7 +3935,7 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti _localctx = new ArithmeticUnaryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(552); + setState(567); ((ArithmeticUnaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -3909,7 +3943,7 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti } else { consume(); } - setState(553); + setState(568); valueExpression(4); } break; @@ -3917,33 +3951,33 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti throw new NoViableAltException(this); } _ctx.stop = _input.LT(-1); - setState(568); + setState(583); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,77,_ctx); + _alt = getInterpreter().adaptivePredict(_input,81,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(566); + setState(581); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,76,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,80,_ctx) ) { case 1: { _localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); - setState(556); + setState(571); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(557); + setState(572); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); - if ( !(((((_la - 115)) & ~0x3f) == 0 && ((1L << (_la - 115)) & ((1L << (ASTERISK - 115)) | (1L << (SLASH - 115)) | (1L << (PERCENT - 115)))) != 0)) ) { + if ( !(((((_la - 117)) & ~0x3f) == 0 && ((1L << (_la - 117)) & ((1L << (ASTERISK - 117)) | (1L << (SLASH - 117)) | (1L << (PERCENT - 117)))) != 0)) ) { ((ArithmeticBinaryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); } else { consume(); } - setState(558); + setState(573); ((ArithmeticBinaryContext)_localctx).right = valueExpression(4); } break; @@ -3952,9 +3986,9 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti _localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); - setState(559); + setState(574); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(560); + setState(575); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -3962,7 +3996,7 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti } else { consume(); } - setState(561); + setState(576); ((ArithmeticBinaryContext)_localctx).right = valueExpression(3); } break; @@ -3971,20 +4005,20 @@ private ValueExpressionContext valueExpression(int _p) throws RecognitionExcepti _localctx = new ComparisonContext(new ValueExpressionContext(_parentctx, _parentState)); ((ComparisonContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); - setState(562); + setState(577); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(563); + setState(578); comparisonOperator(); - setState(564); + setState(579); ((ComparisonContext)_localctx).right = valueExpression(2); } break; } } } - setState(570); + setState(585); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,77,_ctx); + _alt = getInterpreter().adaptivePredict(_input,81,_ctx); } } } @@ -4256,16 +4290,16 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc int _alt; enterOuterAlt(_localctx, 1); { - setState(607); + setState(622); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,82,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,86,_ctx) ) { case 1: { _localctx = new CastContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(572); + setState(587); castExpression(); } break; @@ -4274,7 +4308,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new ExtractContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(573); + setState(588); extractExpression(); } break; @@ -4283,7 +4317,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new CurrentDateTimeFunctionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(574); + setState(589); builtinDateTimeFunction(); } break; @@ -4292,7 +4326,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new ConstantDefaultContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(575); + setState(590); constant(); } break; @@ -4301,18 +4335,18 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new StarContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(579); + setState(594); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (OPTIMIZED - 67)) | (1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)) | (1L << (IDENTIFIER - 67)) | (1L << (DIGIT_IDENTIFIER - 67)) | (1L << (QUOTED_IDENTIFIER - 67)) | (1L << (BACKQUOTED_IDENTIFIER - 67)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 69)) & ~0x3f) == 0 && ((1L << (_la - 69)) & ((1L << (OPTIMIZED - 69)) | (1L << (PARSED - 69)) | (1L << (PHYSICAL - 69)) | (1L << (PLAN - 69)) | (1L << (RLIKE - 69)) | (1L << (QUERY - 69)) | (1L << (SCHEMAS - 69)) | (1L << (SECOND - 69)) | (1L << (SHOW - 69)) | (1L << (SYS - 69)) | (1L << (TABLES - 69)) | (1L << (TEXT - 69)) | (1L << (TYPE - 69)) | (1L << (TYPES - 69)) | (1L << (VERIFY - 69)) | (1L << (YEAR - 69)) | (1L << (IDENTIFIER - 69)) | (1L << (DIGIT_IDENTIFIER - 69)) | (1L << (QUOTED_IDENTIFIER - 69)) | (1L << (BACKQUOTED_IDENTIFIER - 69)))) != 0)) { { - setState(576); + setState(591); qualifiedName(); - setState(577); + setState(592); match(DOT); } } - setState(581); + setState(596); match(ASTERISK); } break; @@ -4321,7 +4355,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new FunctionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(582); + setState(597); functionExpression(); } break; @@ -4330,11 +4364,11 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new SubqueryExpressionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(583); + setState(598); match(T__0); - setState(584); + setState(599); query(); - setState(585); + setState(600); match(T__1); } break; @@ -4343,7 +4377,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new DereferenceContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(587); + setState(602); qualifiedName(); } break; @@ -4352,11 +4386,11 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new ParenthesizedExpressionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(588); + setState(603); match(T__0); - setState(589); + setState(604); expression(); - setState(590); + setState(605); match(T__1); } break; @@ -4365,51 +4399,51 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new CaseContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(592); + setState(607); match(CASE); - setState(594); + setState(609); _la = _input.LA(1); - if (((((_la - 1)) & ~0x3f) == 0 && ((1L << (_la - 1)) & ((1L << (T__0 - 1)) | (1L << (ANALYZE - 1)) | (1L << (ANALYZED - 1)) | (1L << (CASE - 1)) | (1L << (CAST - 1)) | (1L << (CATALOGS - 1)) | (1L << (COLUMNS - 1)) | (1L << (CONVERT - 1)) | (1L << (CURRENT_DATE - 1)) | (1L << (CURRENT_TIME - 1)) | (1L << (CURRENT_TIMESTAMP - 1)) | (1L << (DAY - 1)) | (1L << (DEBUG - 1)) | (1L << (EXECUTABLE - 1)) | (1L << (EXISTS - 1)) | (1L << (EXPLAIN - 1)) | (1L << (EXTRACT - 1)) | (1L << (FALSE - 1)) | (1L << (FIRST - 1)) | (1L << (FORMAT - 1)) | (1L << (FULL - 1)) | (1L << (FUNCTIONS - 1)) | (1L << (GRAPHVIZ - 1)) | (1L << (HOUR - 1)) | (1L << (INTERVAL - 1)) | (1L << (LAST - 1)) | (1L << (LEFT - 1)) | (1L << (LIMIT - 1)) | (1L << (MAPPED - 1)) | (1L << (MATCH - 1)) | (1L << (MINUTE - 1)) | (1L << (MONTH - 1)) | (1L << (NOT - 1)) | (1L << (NULL - 1)))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (OPTIMIZED - 67)) | (1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RIGHT - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TRUE - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)) | (1L << (FUNCTION_ESC - 67)) | (1L << (DATE_ESC - 67)) | (1L << (TIME_ESC - 67)) | (1L << (TIMESTAMP_ESC - 67)) | (1L << (GUID_ESC - 67)) | (1L << (PLUS - 67)) | (1L << (MINUS - 67)) | (1L << (ASTERISK - 67)) | (1L << (PARAM - 67)) | (1L << (STRING - 67)) | (1L << (INTEGER_VALUE - 67)) | (1L << (DECIMAL_VALUE - 67)) | (1L << (IDENTIFIER - 67)) | (1L << (DIGIT_IDENTIFIER - 67)) | (1L << (QUOTED_IDENTIFIER - 67)) | (1L << (BACKQUOTED_IDENTIFIER - 67)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (NOT - 65)) | (1L << (NULL - 65)) | (1L << (OPTIMIZED - 65)) | (1L << (PARSED - 65)) | (1L << (PHYSICAL - 65)) | (1L << (PLAN - 65)) | (1L << (RIGHT - 65)) | (1L << (RLIKE - 65)) | (1L << (QUERY - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SECOND - 65)) | (1L << (SHOW - 65)) | (1L << (SYS - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TRUE - 65)) | (1L << (TYPE - 65)) | (1L << (TYPES - 65)) | (1L << (VERIFY - 65)) | (1L << (YEAR - 65)) | (1L << (FUNCTION_ESC - 65)) | (1L << (DATE_ESC - 65)) | (1L << (TIME_ESC - 65)) | (1L << (TIMESTAMP_ESC - 65)) | (1L << (GUID_ESC - 65)) | (1L << (PLUS - 65)) | (1L << (MINUS - 65)) | (1L << (ASTERISK - 65)) | (1L << (PARAM - 65)) | (1L << (STRING - 65)) | (1L << (INTEGER_VALUE - 65)) | (1L << (DECIMAL_VALUE - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)))) != 0) || _la==QUOTED_IDENTIFIER || _la==BACKQUOTED_IDENTIFIER) { { - setState(593); + setState(608); ((CaseContext)_localctx).operand = booleanExpression(0); } } - setState(597); + setState(612); _errHandler.sync(this); _la = _input.LA(1); do { { { - setState(596); + setState(611); whenClause(); } } - setState(599); + setState(614); _errHandler.sync(this); _la = _input.LA(1); } while ( _la==WHEN ); - setState(603); + setState(618); _la = _input.LA(1); if (_la==ELSE) { { - setState(601); + setState(616); match(ELSE); - setState(602); + setState(617); ((CaseContext)_localctx).elseClause = booleanExpression(0); } } - setState(605); + setState(620); match(END); } break; } _ctx.stop = _input.LT(-1); - setState(614); + setState(629); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,83,_ctx); + _alt = getInterpreter().adaptivePredict(_input,87,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); @@ -4418,18 +4452,18 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc { _localctx = new CastOperatorExpressionContext(new PrimaryExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_primaryExpression); - setState(609); + setState(624); if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); - setState(610); + setState(625); match(CAST_OP); - setState(611); + setState(626); dataType(); } } } - setState(616); + setState(631); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,83,_ctx); + _alt = getInterpreter().adaptivePredict(_input,87,_ctx); } } } @@ -4472,26 +4506,26 @@ public final BuiltinDateTimeFunctionContext builtinDateTimeFunction() throws Rec BuiltinDateTimeFunctionContext _localctx = new BuiltinDateTimeFunctionContext(_ctx, getState()); enterRule(_localctx, 62, RULE_builtinDateTimeFunction); try { - setState(620); + setState(635); switch (_input.LA(1)) { case CURRENT_TIMESTAMP: enterOuterAlt(_localctx, 1); { - setState(617); + setState(632); ((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_TIMESTAMP); } break; case CURRENT_DATE: enterOuterAlt(_localctx, 2); { - setState(618); + setState(633); ((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_DATE); } break; case CURRENT_TIME: enterOuterAlt(_localctx, 3); { - setState(619); + setState(634); ((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_TIME); } break; @@ -4542,42 +4576,42 @@ public final CastExpressionContext castExpression() throws RecognitionException CastExpressionContext _localctx = new CastExpressionContext(_ctx, getState()); enterRule(_localctx, 64, RULE_castExpression); try { - setState(632); + setState(647); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,85,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,89,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(622); + setState(637); castTemplate(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(623); + setState(638); match(FUNCTION_ESC); - setState(624); + setState(639); castTemplate(); - setState(625); + setState(640); match(ESC_END); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(627); + setState(642); convertTemplate(); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(628); + setState(643); match(FUNCTION_ESC); - setState(629); + setState(644); convertTemplate(); - setState(630); + setState(645); match(ESC_END); } break; @@ -4628,17 +4662,17 @@ public final CastTemplateContext castTemplate() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(634); + setState(649); match(CAST); - setState(635); + setState(650); match(T__0); - setState(636); + setState(651); expression(); - setState(637); + setState(652); match(AS); - setState(638); + setState(653); dataType(); - setState(639); + setState(654); match(T__1); } } @@ -4686,17 +4720,17 @@ public final ConvertTemplateContext convertTemplate() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(641); + setState(656); match(CONVERT); - setState(642); + setState(657); match(T__0); - setState(643); + setState(658); expression(); - setState(644); + setState(659); match(T__2); - setState(645); + setState(660); dataType(); - setState(646); + setState(661); match(T__1); } } @@ -4740,23 +4774,23 @@ public final ExtractExpressionContext extractExpression() throws RecognitionExce ExtractExpressionContext _localctx = new ExtractExpressionContext(_ctx, getState()); enterRule(_localctx, 70, RULE_extractExpression); try { - setState(653); + setState(668); switch (_input.LA(1)) { case EXTRACT: enterOuterAlt(_localctx, 1); { - setState(648); + setState(663); extractTemplate(); } break; case FUNCTION_ESC: enterOuterAlt(_localctx, 2); { - setState(649); + setState(664); match(FUNCTION_ESC); - setState(650); + setState(665); extractTemplate(); - setState(651); + setState(666); match(ESC_END); } break; @@ -4810,17 +4844,17 @@ public final ExtractTemplateContext extractTemplate() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(655); + setState(670); match(EXTRACT); - setState(656); + setState(671); match(T__0); - setState(657); + setState(672); ((ExtractTemplateContext)_localctx).field = identifier(); - setState(658); + setState(673); match(FROM); - setState(659); + setState(674); valueExpression(0); - setState(660); + setState(675); match(T__1); } } @@ -4863,7 +4897,7 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx FunctionExpressionContext _localctx = new FunctionExpressionContext(_ctx, getState()); enterRule(_localctx, 74, RULE_functionExpression); try { - setState(667); + setState(682); switch (_input.LA(1)) { case ANALYZE: case ANALYZED: @@ -4912,18 +4946,18 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 1); { - setState(662); + setState(677); functionTemplate(); } break; case FUNCTION_ESC: enterOuterAlt(_localctx, 2); { - setState(663); + setState(678); match(FUNCTION_ESC); - setState(664); + setState(679); functionTemplate(); - setState(665); + setState(680); match(ESC_END); } break; @@ -4981,45 +5015,45 @@ public final FunctionTemplateContext functionTemplate() throws RecognitionExcept try { enterOuterAlt(_localctx, 1); { - setState(669); + setState(684); functionName(); - setState(670); + setState(685); match(T__0); - setState(682); + setState(697); _la = _input.LA(1); - if (((((_la - 1)) & ~0x3f) == 0 && ((1L << (_la - 1)) & ((1L << (T__0 - 1)) | (1L << (ALL - 1)) | (1L << (ANALYZE - 1)) | (1L << (ANALYZED - 1)) | (1L << (CASE - 1)) | (1L << (CAST - 1)) | (1L << (CATALOGS - 1)) | (1L << (COLUMNS - 1)) | (1L << (CONVERT - 1)) | (1L << (CURRENT_DATE - 1)) | (1L << (CURRENT_TIME - 1)) | (1L << (CURRENT_TIMESTAMP - 1)) | (1L << (DAY - 1)) | (1L << (DEBUG - 1)) | (1L << (DISTINCT - 1)) | (1L << (EXECUTABLE - 1)) | (1L << (EXISTS - 1)) | (1L << (EXPLAIN - 1)) | (1L << (EXTRACT - 1)) | (1L << (FALSE - 1)) | (1L << (FIRST - 1)) | (1L << (FORMAT - 1)) | (1L << (FULL - 1)) | (1L << (FUNCTIONS - 1)) | (1L << (GRAPHVIZ - 1)) | (1L << (HOUR - 1)) | (1L << (INTERVAL - 1)) | (1L << (LAST - 1)) | (1L << (LEFT - 1)) | (1L << (LIMIT - 1)) | (1L << (MAPPED - 1)) | (1L << (MATCH - 1)) | (1L << (MINUTE - 1)) | (1L << (MONTH - 1)) | (1L << (NOT - 1)) | (1L << (NULL - 1)))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (OPTIMIZED - 67)) | (1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RIGHT - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TRUE - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)) | (1L << (FUNCTION_ESC - 67)) | (1L << (DATE_ESC - 67)) | (1L << (TIME_ESC - 67)) | (1L << (TIMESTAMP_ESC - 67)) | (1L << (GUID_ESC - 67)) | (1L << (PLUS - 67)) | (1L << (MINUS - 67)) | (1L << (ASTERISK - 67)) | (1L << (PARAM - 67)) | (1L << (STRING - 67)) | (1L << (INTEGER_VALUE - 67)) | (1L << (DECIMAL_VALUE - 67)) | (1L << (IDENTIFIER - 67)) | (1L << (DIGIT_IDENTIFIER - 67)) | (1L << (QUOTED_IDENTIFIER - 67)) | (1L << (BACKQUOTED_IDENTIFIER - 67)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ALL) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << DISTINCT) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 65)) & ~0x3f) == 0 && ((1L << (_la - 65)) & ((1L << (NOT - 65)) | (1L << (NULL - 65)) | (1L << (OPTIMIZED - 65)) | (1L << (PARSED - 65)) | (1L << (PHYSICAL - 65)) | (1L << (PLAN - 65)) | (1L << (RIGHT - 65)) | (1L << (RLIKE - 65)) | (1L << (QUERY - 65)) | (1L << (SCHEMAS - 65)) | (1L << (SECOND - 65)) | (1L << (SHOW - 65)) | (1L << (SYS - 65)) | (1L << (TABLES - 65)) | (1L << (TEXT - 65)) | (1L << (TRUE - 65)) | (1L << (TYPE - 65)) | (1L << (TYPES - 65)) | (1L << (VERIFY - 65)) | (1L << (YEAR - 65)) | (1L << (FUNCTION_ESC - 65)) | (1L << (DATE_ESC - 65)) | (1L << (TIME_ESC - 65)) | (1L << (TIMESTAMP_ESC - 65)) | (1L << (GUID_ESC - 65)) | (1L << (PLUS - 65)) | (1L << (MINUS - 65)) | (1L << (ASTERISK - 65)) | (1L << (PARAM - 65)) | (1L << (STRING - 65)) | (1L << (INTEGER_VALUE - 65)) | (1L << (DECIMAL_VALUE - 65)) | (1L << (IDENTIFIER - 65)) | (1L << (DIGIT_IDENTIFIER - 65)))) != 0) || _la==QUOTED_IDENTIFIER || _la==BACKQUOTED_IDENTIFIER) { { - setState(672); + setState(687); _la = _input.LA(1); if (_la==ALL || _la==DISTINCT) { { - setState(671); + setState(686); setQuantifier(); } } - setState(674); + setState(689); expression(); - setState(679); + setState(694); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(675); + setState(690); match(T__2); - setState(676); + setState(691); expression(); } } - setState(681); + setState(696); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(684); + setState(699); match(T__1); } } @@ -5063,19 +5097,19 @@ public final FunctionNameContext functionName() throws RecognitionException { FunctionNameContext _localctx = new FunctionNameContext(_ctx, getState()); enterRule(_localctx, 78, RULE_functionName); try { - setState(689); + setState(704); switch (_input.LA(1)) { case LEFT: enterOuterAlt(_localctx, 1); { - setState(686); + setState(701); match(LEFT); } break; case RIGHT: enterOuterAlt(_localctx, 2); { - setState(687); + setState(702); match(RIGHT); } break; @@ -5124,7 +5158,7 @@ public final FunctionNameContext functionName() throws RecognitionException { case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 3); { - setState(688); + setState(703); identifier(); } break; @@ -5355,13 +5389,13 @@ public final ConstantContext constant() throws RecognitionException { enterRule(_localctx, 80, RULE_constant); try { int _alt; - setState(717); + setState(732); switch (_input.LA(1)) { case NULL: _localctx = new NullLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(691); + setState(706); match(NULL); } break; @@ -5369,7 +5403,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new IntervalLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(692); + setState(707); interval(); } break; @@ -5378,7 +5412,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new NumericLiteralContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(693); + setState(708); number(); } break; @@ -5387,7 +5421,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new BooleanLiteralContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(694); + setState(709); booleanValue(); } break; @@ -5395,7 +5429,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new StringLiteralContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(696); + setState(711); _errHandler.sync(this); _alt = 1; do { @@ -5403,7 +5437,7 @@ public final ConstantContext constant() throws RecognitionException { case 1: { { - setState(695); + setState(710); match(STRING); } } @@ -5411,9 +5445,9 @@ public final ConstantContext constant() throws RecognitionException { default: throw new NoViableAltException(this); } - setState(698); + setState(713); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,92,_ctx); + _alt = getInterpreter().adaptivePredict(_input,96,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; @@ -5421,7 +5455,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new ParamLiteralContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(700); + setState(715); match(PARAM); } break; @@ -5429,11 +5463,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new DateEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(701); + setState(716); match(DATE_ESC); - setState(702); + setState(717); string(); - setState(703); + setState(718); match(ESC_END); } break; @@ -5441,11 +5475,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new TimeEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(705); + setState(720); match(TIME_ESC); - setState(706); + setState(721); string(); - setState(707); + setState(722); match(ESC_END); } break; @@ -5453,11 +5487,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new TimestampEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(709); + setState(724); match(TIMESTAMP_ESC); - setState(710); + setState(725); string(); - setState(711); + setState(726); match(ESC_END); } break; @@ -5465,11 +5499,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new GuidEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(713); + setState(728); match(GUID_ESC); - setState(714); + setState(729); string(); - setState(715); + setState(730); match(ESC_END); } break; @@ -5522,9 +5556,9 @@ public final ComparisonOperatorContext comparisonOperator() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(719); + setState(734); _la = _input.LA(1); - if ( !(((((_la - 106)) & ~0x3f) == 0 && ((1L << (_la - 106)) & ((1L << (EQ - 106)) | (1L << (NULLEQ - 106)) | (1L << (NEQ - 106)) | (1L << (LT - 106)) | (1L << (LTE - 106)) | (1L << (GT - 106)) | (1L << (GTE - 106)))) != 0)) ) { + if ( !(((((_la - 108)) & ~0x3f) == 0 && ((1L << (_la - 108)) & ((1L << (EQ - 108)) | (1L << (NULLEQ - 108)) | (1L << (NEQ - 108)) | (1L << (LT - 108)) | (1L << (LTE - 108)) | (1L << (GT - 108)) | (1L << (GTE - 108)))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); @@ -5571,7 +5605,7 @@ public final BooleanValueContext booleanValue() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(721); + setState(736); _la = _input.LA(1); if ( !(_la==FALSE || _la==TRUE) ) { _errHandler.recoverInline(this); @@ -5639,13 +5673,13 @@ public final IntervalContext interval() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(723); + setState(738); match(INTERVAL); - setState(725); + setState(740); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(724); + setState(739); ((IntervalContext)_localctx).sign = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -5656,35 +5690,35 @@ public final IntervalContext interval() throws RecognitionException { } } - setState(729); + setState(744); switch (_input.LA(1)) { case INTEGER_VALUE: case DECIMAL_VALUE: { - setState(727); + setState(742); ((IntervalContext)_localctx).valueNumeric = number(); } break; case PARAM: case STRING: { - setState(728); + setState(743); ((IntervalContext)_localctx).valuePattern = string(); } break; default: throw new NoViableAltException(this); } - setState(731); + setState(746); ((IntervalContext)_localctx).leading = intervalField(); - setState(734); + setState(749); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,96,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,100,_ctx) ) { case 1: { - setState(732); + setState(747); match(TO); - setState(733); + setState(748); ((IntervalContext)_localctx).trailing = intervalField(); } break; @@ -5741,9 +5775,9 @@ public final IntervalFieldContext intervalField() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(736); + setState(751); _la = _input.LA(1); - if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << DAY) | (1L << DAYS) | (1L << HOUR) | (1L << HOURS) | (1L << MINUTE) | (1L << MINUTES) | (1L << MONTH) | (1L << MONTHS))) != 0) || ((((_la - 78)) & ~0x3f) == 0 && ((1L << (_la - 78)) & ((1L << (SECOND - 78)) | (1L << (SECONDS - 78)) | (1L << (YEAR - 78)) | (1L << (YEARS - 78)))) != 0)) ) { + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << DAY) | (1L << DAYS) | (1L << HOUR) | (1L << HOURS) | (1L << MINUTE) | (1L << MINUTES) | (1L << MONTH) | (1L << MONTHS))) != 0) || ((((_la - 80)) & ~0x3f) == 0 && ((1L << (_la - 80)) & ((1L << (SECOND - 80)) | (1L << (SECONDS - 80)) | (1L << (YEAR - 80)) | (1L << (YEARS - 80)))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); @@ -5799,7 +5833,7 @@ public final DataTypeContext dataType() throws RecognitionException { _localctx = new PrimitiveDataTypeContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(738); + setState(753); identifier(); } } @@ -5851,25 +5885,25 @@ public final QualifiedNameContext qualifiedName() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(745); + setState(760); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,97,_ctx); + _alt = getInterpreter().adaptivePredict(_input,101,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(740); + setState(755); identifier(); - setState(741); + setState(756); match(DOT); } } } - setState(747); + setState(762); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,97,_ctx); + _alt = getInterpreter().adaptivePredict(_input,101,_ctx); } - setState(748); + setState(763); identifier(); } } @@ -5914,13 +5948,13 @@ public final IdentifierContext identifier() throws RecognitionException { IdentifierContext _localctx = new IdentifierContext(_ctx, getState()); enterRule(_localctx, 94, RULE_identifier); try { - setState(752); + setState(767); switch (_input.LA(1)) { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 1); { - setState(750); + setState(765); quoteIdentifier(); } break; @@ -5967,7 +6001,7 @@ public final IdentifierContext identifier() throws RecognitionException { case DIGIT_IDENTIFIER: enterOuterAlt(_localctx, 2); { - setState(751); + setState(766); unquoteIdentifier(); } break; @@ -6020,43 +6054,43 @@ public final TableIdentifierContext tableIdentifier() throws RecognitionExceptio enterRule(_localctx, 96, RULE_tableIdentifier); int _la; try { - setState(766); + setState(781); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,101,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,105,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(757); + setState(772); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (OPTIMIZED - 67)) | (1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)) | (1L << (IDENTIFIER - 67)) | (1L << (DIGIT_IDENTIFIER - 67)) | (1L << (QUOTED_IDENTIFIER - 67)) | (1L << (BACKQUOTED_IDENTIFIER - 67)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 69)) & ~0x3f) == 0 && ((1L << (_la - 69)) & ((1L << (OPTIMIZED - 69)) | (1L << (PARSED - 69)) | (1L << (PHYSICAL - 69)) | (1L << (PLAN - 69)) | (1L << (RLIKE - 69)) | (1L << (QUERY - 69)) | (1L << (SCHEMAS - 69)) | (1L << (SECOND - 69)) | (1L << (SHOW - 69)) | (1L << (SYS - 69)) | (1L << (TABLES - 69)) | (1L << (TEXT - 69)) | (1L << (TYPE - 69)) | (1L << (TYPES - 69)) | (1L << (VERIFY - 69)) | (1L << (YEAR - 69)) | (1L << (IDENTIFIER - 69)) | (1L << (DIGIT_IDENTIFIER - 69)) | (1L << (QUOTED_IDENTIFIER - 69)) | (1L << (BACKQUOTED_IDENTIFIER - 69)))) != 0)) { { - setState(754); + setState(769); ((TableIdentifierContext)_localctx).catalog = identifier(); - setState(755); + setState(770); match(T__3); } } - setState(759); + setState(774); match(TABLE_IDENTIFIER); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(763); + setState(778); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,100,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,104,_ctx) ) { case 1: { - setState(760); + setState(775); ((TableIdentifierContext)_localctx).catalog = identifier(); - setState(761); + setState(776); match(T__3); } break; } - setState(765); + setState(780); ((TableIdentifierContext)_localctx).name = identifier(); } break; @@ -6123,13 +6157,13 @@ public final QuoteIdentifierContext quoteIdentifier() throws RecognitionExceptio QuoteIdentifierContext _localctx = new QuoteIdentifierContext(_ctx, getState()); enterRule(_localctx, 98, RULE_quoteIdentifier); try { - setState(770); + setState(785); switch (_input.LA(1)) { case QUOTED_IDENTIFIER: _localctx = new QuotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(768); + setState(783); match(QUOTED_IDENTIFIER); } break; @@ -6137,7 +6171,7 @@ public final QuoteIdentifierContext quoteIdentifier() throws RecognitionExceptio _localctx = new BackQuotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(769); + setState(784); match(BACKQUOTED_IDENTIFIER); } break; @@ -6209,13 +6243,13 @@ public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionExce UnquoteIdentifierContext _localctx = new UnquoteIdentifierContext(_ctx, getState()); enterRule(_localctx, 100, RULE_unquoteIdentifier); try { - setState(775); + setState(790); switch (_input.LA(1)) { case IDENTIFIER: _localctx = new UnquotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(772); + setState(787); match(IDENTIFIER); } break; @@ -6261,7 +6295,7 @@ public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionExce _localctx = new UnquotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(773); + setState(788); nonReserved(); } break; @@ -6269,7 +6303,7 @@ public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionExce _localctx = new DigitIdentifierContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(774); + setState(789); match(DIGIT_IDENTIFIER); } break; @@ -6338,13 +6372,13 @@ public final NumberContext number() throws RecognitionException { NumberContext _localctx = new NumberContext(_ctx, getState()); enterRule(_localctx, 102, RULE_number); try { - setState(779); + setState(794); switch (_input.LA(1)) { case DECIMAL_VALUE: _localctx = new DecimalLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(777); + setState(792); match(DECIMAL_VALUE); } break; @@ -6352,7 +6386,7 @@ public final NumberContext number() throws RecognitionException { _localctx = new IntegerLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(778); + setState(793); match(INTEGER_VALUE); } break; @@ -6400,7 +6434,7 @@ public final StringContext string() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(781); + setState(796); _la = _input.LA(1); if ( !(_la==PARAM || _la==STRING) ) { _errHandler.recoverInline(this); @@ -6456,13 +6490,13 @@ public final WhenClauseContext whenClause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(783); + setState(798); match(WHEN); - setState(784); + setState(799); ((WhenClauseContext)_localctx).condition = expression(); - setState(785); + setState(800); match(THEN); - setState(786); + setState(801); ((WhenClauseContext)_localctx).result = expression(); } } @@ -6543,9 +6577,9 @@ public final NonReservedContext nonReserved() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(788); + setState(803); _la = _input.LA(1); - if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (OPTIMIZED - 67)) | (1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)))) != 0)) ) { + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 69)) & ~0x3f) == 0 && ((1L << (_la - 69)) & ((1L << (OPTIMIZED - 69)) | (1L << (PARSED - 69)) | (1L << (PHYSICAL - 69)) | (1L << (PLAN - 69)) | (1L << (RLIKE - 69)) | (1L << (QUERY - 69)) | (1L << (SCHEMAS - 69)) | (1L << (SECOND - 69)) | (1L << (SHOW - 69)) | (1L << (SYS - 69)) | (1L << (TABLES - 69)) | (1L << (TEXT - 69)) | (1L << (TYPE - 69)) | (1L << (TYPES - 69)) | (1L << (VERIFY - 69)) | (1L << (YEAR - 69)))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); @@ -6603,7 +6637,7 @@ private boolean primaryExpression_sempred(PrimaryExpressionContext _localctx, in } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3\u0088\u0319\4\2\t"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3\u008a\u0328\4\2\t"+ "\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13"+ "\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ @@ -6613,312 +6647,318 @@ private boolean primaryExpression_sempred(PrimaryExpressionContext _localctx, in "\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\3\2\3\2\3\2\3\3\3\3\3\3\3\4\3"+ "\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4\u0080\n\4\f\4\16\4\u0083\13\4\3\4\5"+ "\4\u0086\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4\u008f\n\4\f\4\16\4\u0092"+ - "\13\4\3\4\5\4\u0095\n\4\3\4\3\4\3\4\3\4\3\4\5\4\u009c\n\4\3\4\3\4\3\4"+ - "\3\4\3\4\5\4\u00a3\n\4\3\4\3\4\3\4\5\4\u00a8\n\4\3\4\3\4\3\4\5\4\u00ad"+ - "\n\4\3\4\3\4\3\4\3\4\3\4\3\4\5\4\u00b5\n\4\3\4\3\4\5\4\u00b9\n\4\3\4\3"+ - "\4\3\4\3\4\7\4\u00bf\n\4\f\4\16\4\u00c2\13\4\5\4\u00c4\n\4\3\4\3\4\3\4"+ - "\3\4\5\4\u00ca\n\4\3\4\3\4\3\4\5\4\u00cf\n\4\3\4\5\4\u00d2\n\4\3\4\3\4"+ - "\3\4\5\4\u00d7\n\4\3\4\5\4\u00da\n\4\5\4\u00dc\n\4\3\5\3\5\3\5\3\5\7\5"+ - "\u00e2\n\5\f\5\16\5\u00e5\13\5\5\5\u00e7\n\5\3\5\3\5\3\6\3\6\3\6\3\6\3"+ - "\6\3\6\7\6\u00f1\n\6\f\6\16\6\u00f4\13\6\5\6\u00f6\n\6\3\6\5\6\u00f9\n"+ - "\6\3\7\3\7\3\7\3\7\3\7\5\7\u0100\n\7\3\b\3\b\3\b\3\b\3\b\5\b\u0107\n\b"+ - "\3\t\3\t\5\t\u010b\n\t\3\t\3\t\5\t\u010f\n\t\3\n\3\n\5\n\u0113\n\n\3\n"+ - "\3\n\3\n\7\n\u0118\n\n\f\n\16\n\u011b\13\n\3\n\5\n\u011e\n\n\3\n\3\n\5"+ - "\n\u0122\n\n\3\n\3\n\3\n\5\n\u0127\n\n\3\n\3\n\5\n\u012b\n\n\3\13\3\13"+ - "\3\13\3\13\7\13\u0131\n\13\f\13\16\13\u0134\13\13\3\f\5\f\u0137\n\f\3"+ - "\f\3\f\3\f\7\f\u013c\n\f\f\f\16\f\u013f\13\f\3\r\3\r\3\16\3\16\3\16\3"+ - "\16\7\16\u0147\n\16\f\16\16\16\u014a\13\16\5\16\u014c\n\16\3\16\3\16\5"+ - "\16\u0150\n\16\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\21\3\21\5\21"+ - "\u015c\n\21\3\21\5\21\u015f\n\21\3\22\3\22\7\22\u0163\n\22\f\22\16\22"+ - "\u0166\13\22\3\23\3\23\3\23\3\23\5\23\u016c\n\23\3\23\3\23\3\23\3\23\3"+ - "\23\5\23\u0173\n\23\3\24\5\24\u0176\n\24\3\24\3\24\5\24\u017a\n\24\3\24"+ - "\3\24\5\24\u017e\n\24\3\24\3\24\5\24\u0182\n\24\5\24\u0184\n\24\3\25\3"+ - "\25\3\25\3\25\3\25\3\25\3\25\7\25\u018d\n\25\f\25\16\25\u0190\13\25\3"+ - "\25\3\25\5\25\u0194\n\25\3\26\3\26\5\26\u0198\n\26\3\26\5\26\u019b\n\26"+ - "\3\26\3\26\3\26\3\26\5\26\u01a1\n\26\3\26\5\26\u01a4\n\26\3\26\3\26\3"+ - "\26\3\26\5\26\u01aa\n\26\3\26\5\26\u01ad\n\26\5\26\u01af\n\26\3\27\3\27"+ - "\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30"+ - "\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30"+ - "\3\30\3\30\3\30\5\30\u01d2\n\30\3\30\3\30\3\30\3\30\3\30\3\30\7\30\u01da"+ - "\n\30\f\30\16\30\u01dd\13\30\3\31\3\31\7\31\u01e1\n\31\f\31\16\31\u01e4"+ - "\13\31\3\32\3\32\5\32\u01e8\n\32\3\33\5\33\u01eb\n\33\3\33\3\33\3\33\3"+ - "\33\3\33\3\33\5\33\u01f3\n\33\3\33\3\33\3\33\3\33\3\33\7\33\u01fa\n\33"+ - "\f\33\16\33\u01fd\13\33\3\33\3\33\3\33\5\33\u0202\n\33\3\33\3\33\3\33"+ - "\3\33\3\33\3\33\5\33\u020a\n\33\3\33\3\33\3\33\5\33\u020f\n\33\3\33\3"+ - "\33\3\33\3\33\5\33\u0215\n\33\3\33\5\33\u0218\n\33\3\34\3\34\3\34\3\35"+ - "\3\35\5\35\u021f\n\35\3\36\3\36\3\36\3\36\3\36\3\36\5\36\u0227\n\36\3"+ - "\37\3\37\3\37\3\37\5\37\u022d\n\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37"+ - "\3\37\3\37\3\37\7\37\u0239\n\37\f\37\16\37\u023c\13\37\3 \3 \3 \3 \3 "+ - "\3 \3 \3 \5 \u0246\n \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \5 \u0255"+ - "\n \3 \6 \u0258\n \r \16 \u0259\3 \3 \5 \u025e\n \3 \3 \5 \u0262\n \3"+ - " \3 \3 \7 \u0267\n \f \16 \u026a\13 \3!\3!\3!\5!\u026f\n!\3\"\3\"\3\""+ - "\3\"\3\"\3\"\3\"\3\"\3\"\3\"\5\"\u027b\n\"\3#\3#\3#\3#\3#\3#\3#\3$\3$"+ - "\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\5%\u0290\n%\3&\3&\3&\3&\3&\3&\3&\3\'\3"+ - "\'\3\'\3\'\3\'\5\'\u029e\n\'\3(\3(\3(\5(\u02a3\n(\3(\3(\3(\7(\u02a8\n"+ - "(\f(\16(\u02ab\13(\5(\u02ad\n(\3(\3(\3)\3)\3)\5)\u02b4\n)\3*\3*\3*\3*"+ - "\3*\6*\u02bb\n*\r*\16*\u02bc\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3"+ - "*\3*\3*\3*\5*\u02d0\n*\3+\3+\3,\3,\3-\3-\5-\u02d8\n-\3-\3-\5-\u02dc\n"+ - "-\3-\3-\3-\5-\u02e1\n-\3.\3.\3/\3/\3\60\3\60\3\60\7\60\u02ea\n\60\f\60"+ - "\16\60\u02ed\13\60\3\60\3\60\3\61\3\61\5\61\u02f3\n\61\3\62\3\62\3\62"+ - "\5\62\u02f8\n\62\3\62\3\62\3\62\3\62\5\62\u02fe\n\62\3\62\5\62\u0301\n"+ - "\62\3\63\3\63\5\63\u0305\n\63\3\64\3\64\3\64\5\64\u030a\n\64\3\65\3\65"+ - "\5\65\u030e\n\65\3\66\3\66\3\67\3\67\3\67\3\67\3\67\38\38\38\2\5.<>9\2"+ + "\13\4\3\4\5\4\u0095\n\4\3\4\3\4\3\4\3\4\3\4\5\4\u009c\n\4\3\4\3\4\5\4"+ + "\u00a0\n\4\3\4\3\4\3\4\3\4\5\4\u00a6\n\4\3\4\3\4\3\4\5\4\u00ab\n\4\3\4"+ + "\3\4\3\4\5\4\u00b0\n\4\3\4\3\4\5\4\u00b4\n\4\3\4\3\4\3\4\5\4\u00b9\n\4"+ + "\3\4\3\4\3\4\3\4\3\4\3\4\5\4\u00c1\n\4\3\4\3\4\5\4\u00c5\n\4\3\4\3\4\3"+ + "\4\3\4\7\4\u00cb\n\4\f\4\16\4\u00ce\13\4\5\4\u00d0\n\4\3\4\3\4\3\4\3\4"+ + "\5\4\u00d6\n\4\3\4\3\4\3\4\5\4\u00db\n\4\3\4\5\4\u00de\n\4\3\4\3\4\3\4"+ + "\5\4\u00e3\n\4\3\4\5\4\u00e6\n\4\5\4\u00e8\n\4\3\5\3\5\3\5\3\5\7\5\u00ee"+ + "\n\5\f\5\16\5\u00f1\13\5\5\5\u00f3\n\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\6"+ + "\7\6\u00fd\n\6\f\6\16\6\u0100\13\6\5\6\u0102\n\6\3\6\5\6\u0105\n\6\3\7"+ + "\3\7\3\7\3\7\3\7\5\7\u010c\n\7\3\b\3\b\3\b\3\b\3\b\5\b\u0113\n\b\3\t\3"+ + "\t\5\t\u0117\n\t\3\t\3\t\5\t\u011b\n\t\3\n\3\n\5\n\u011f\n\n\3\n\3\n\3"+ + "\n\7\n\u0124\n\n\f\n\16\n\u0127\13\n\3\n\5\n\u012a\n\n\3\n\3\n\5\n\u012e"+ + "\n\n\3\n\3\n\3\n\5\n\u0133\n\n\3\n\3\n\5\n\u0137\n\n\3\13\3\13\3\13\3"+ + "\13\7\13\u013d\n\13\f\13\16\13\u0140\13\13\3\f\5\f\u0143\n\f\3\f\3\f\3"+ + "\f\7\f\u0148\n\f\f\f\16\f\u014b\13\f\3\r\3\r\3\16\3\16\3\16\3\16\7\16"+ + "\u0153\n\16\f\16\16\16\u0156\13\16\5\16\u0158\n\16\3\16\3\16\5\16\u015c"+ + "\n\16\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\21\3\21\5\21\u0168\n\21"+ + "\3\21\5\21\u016b\n\21\3\22\3\22\7\22\u016f\n\22\f\22\16\22\u0172\13\22"+ + "\3\23\3\23\3\23\3\23\5\23\u0178\n\23\3\23\3\23\3\23\3\23\3\23\5\23\u017f"+ + "\n\23\3\24\5\24\u0182\n\24\3\24\3\24\5\24\u0186\n\24\3\24\3\24\5\24\u018a"+ + "\n\24\3\24\3\24\5\24\u018e\n\24\5\24\u0190\n\24\3\25\3\25\3\25\3\25\3"+ + "\25\3\25\3\25\7\25\u0199\n\25\f\25\16\25\u019c\13\25\3\25\3\25\5\25\u01a0"+ + "\n\25\3\26\5\26\u01a3\n\26\3\26\3\26\5\26\u01a7\n\26\3\26\5\26\u01aa\n"+ + "\26\3\26\3\26\3\26\3\26\5\26\u01b0\n\26\3\26\5\26\u01b3\n\26\3\26\3\26"+ + "\3\26\3\26\5\26\u01b9\n\26\3\26\5\26\u01bc\n\26\5\26\u01be\n\26\3\27\3"+ + "\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3"+ + "\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3"+ + "\30\3\30\3\30\3\30\5\30\u01e1\n\30\3\30\3\30\3\30\3\30\3\30\3\30\7\30"+ + "\u01e9\n\30\f\30\16\30\u01ec\13\30\3\31\3\31\7\31\u01f0\n\31\f\31\16\31"+ + "\u01f3\13\31\3\32\3\32\5\32\u01f7\n\32\3\33\5\33\u01fa\n\33\3\33\3\33"+ + "\3\33\3\33\3\33\3\33\5\33\u0202\n\33\3\33\3\33\3\33\3\33\3\33\7\33\u0209"+ + "\n\33\f\33\16\33\u020c\13\33\3\33\3\33\3\33\5\33\u0211\n\33\3\33\3\33"+ + "\3\33\3\33\3\33\3\33\5\33\u0219\n\33\3\33\3\33\3\33\5\33\u021e\n\33\3"+ + "\33\3\33\3\33\3\33\5\33\u0224\n\33\3\33\5\33\u0227\n\33\3\34\3\34\3\34"+ + "\3\35\3\35\5\35\u022e\n\35\3\36\3\36\3\36\3\36\3\36\3\36\5\36\u0236\n"+ + "\36\3\37\3\37\3\37\3\37\5\37\u023c\n\37\3\37\3\37\3\37\3\37\3\37\3\37"+ + "\3\37\3\37\3\37\3\37\7\37\u0248\n\37\f\37\16\37\u024b\13\37\3 \3 \3 \3"+ + " \3 \3 \3 \3 \5 \u0255\n \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \5 \u0264"+ + "\n \3 \6 \u0267\n \r \16 \u0268\3 \3 \5 \u026d\n \3 \3 \5 \u0271\n \3"+ + " \3 \3 \7 \u0276\n \f \16 \u0279\13 \3!\3!\3!\5!\u027e\n!\3\"\3\"\3\""+ + "\3\"\3\"\3\"\3\"\3\"\3\"\3\"\5\"\u028a\n\"\3#\3#\3#\3#\3#\3#\3#\3$\3$"+ + "\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\5%\u029f\n%\3&\3&\3&\3&\3&\3&\3&\3\'\3"+ + "\'\3\'\3\'\3\'\5\'\u02ad\n\'\3(\3(\3(\5(\u02b2\n(\3(\3(\3(\7(\u02b7\n"+ + "(\f(\16(\u02ba\13(\5(\u02bc\n(\3(\3(\3)\3)\3)\5)\u02c3\n)\3*\3*\3*\3*"+ + "\3*\6*\u02ca\n*\r*\16*\u02cb\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3"+ + "*\3*\3*\3*\5*\u02df\n*\3+\3+\3,\3,\3-\3-\5-\u02e7\n-\3-\3-\5-\u02eb\n"+ + "-\3-\3-\3-\5-\u02f0\n-\3.\3.\3/\3/\3\60\3\60\3\60\7\60\u02f9\n\60\f\60"+ + "\16\60\u02fc\13\60\3\60\3\60\3\61\3\61\5\61\u0302\n\61\3\62\3\62\3\62"+ + "\5\62\u0307\n\62\3\62\3\62\3\62\3\62\5\62\u030d\n\62\3\62\5\62\u0310\n"+ + "\62\3\63\3\63\5\63\u0314\n\63\3\64\3\64\3\64\5\64\u0319\n\64\3\65\3\65"+ + "\5\65\u031d\n\65\3\66\3\66\3\67\3\67\3\67\3\67\3\67\38\38\38\2\5.<>9\2"+ "\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJL"+ - "NPRTVXZ\\^`bdfhjln\2\22\b\2\7\7\t\t\"\"::EEII\4\2,,WW\4\2\t\tEE\4\2))"+ - "\61\61\3\2\34\35\3\2st\4\2\7\7}}\4\2\r\r\34\34\4\2\'\'\66\66\4\2\7\7\36"+ - "\36\3\2uw\3\2lr\4\2&&YY\7\2\31\32/\60>EEIKMPSTVW[\\^^bb\u0378\2p\3\2\2"+ - "\2\4s\3\2\2\2\6\u00db\3\2\2\2\b\u00e6\3\2\2\2\n\u00ea\3\2\2\2\f\u00ff"+ - "\3\2\2\2\16\u0106\3\2\2\2\20\u0108\3\2\2\2\22\u0110\3\2\2\2\24\u012c\3"+ - "\2\2\2\26\u0136\3\2\2\2\30\u0140\3\2\2\2\32\u014f\3\2\2\2\34\u0151\3\2"+ - "\2\2\36\u0157\3\2\2\2 \u0159\3\2\2\2\"\u0160\3\2\2\2$\u0172\3\2\2\2&\u0183"+ - "\3\2\2\2(\u0193\3\2\2\2*\u01ae\3\2\2\2,\u01b0\3\2\2\2.\u01d1\3\2\2\2\60"+ - "\u01e2\3\2\2\2\62\u01e5\3\2\2\2\64\u0217\3\2\2\2\66\u0219\3\2\2\28\u021c"+ - "\3\2\2\2:\u0226\3\2\2\2<\u022c\3\2\2\2>\u0261\3\2\2\2@\u026e\3\2\2\2B"+ - "\u027a\3\2\2\2D\u027c\3\2\2\2F\u0283\3\2\2\2H\u028f\3\2\2\2J\u0291\3\2"+ - "\2\2L\u029d\3\2\2\2N\u029f\3\2\2\2P\u02b3\3\2\2\2R\u02cf\3\2\2\2T\u02d1"+ - "\3\2\2\2V\u02d3\3\2\2\2X\u02d5\3\2\2\2Z\u02e2\3\2\2\2\\\u02e4\3\2\2\2"+ - "^\u02eb\3\2\2\2`\u02f2\3\2\2\2b\u0300\3\2\2\2d\u0304\3\2\2\2f\u0309\3"+ - "\2\2\2h\u030d\3\2\2\2j\u030f\3\2\2\2l\u0311\3\2\2\2n\u0316\3\2\2\2pq\5"+ - "\6\4\2qr\7\2\2\3r\3\3\2\2\2st\5,\27\2tu\7\2\2\3u\5\3\2\2\2v\u00dc\5\b"+ - "\5\2w\u0085\7$\2\2x\u0081\7\3\2\2yz\7K\2\2z\u0080\t\2\2\2{|\7(\2\2|\u0080"+ - "\t\3\2\2}~\7^\2\2~\u0080\5V,\2\177y\3\2\2\2\177{\3\2\2\2\177}\3\2\2\2"+ + "NPRTVXZ\\^`bdfhjln\2\22\b\2\7\7\t\t\"\"<ARSde\3\2}~\30\2\b\t\23\24"+ + "\26\31\33\33\"\"$$\'(+-\60\60\65\6588;<>>@@GGKMORUVXY]^``dd\u038b\2p\3"+ + "\2\2\2\4s\3\2\2\2\6\u00e7\3\2\2\2\b\u00f2\3\2\2\2\n\u00f6\3\2\2\2\f\u010b"+ + "\3\2\2\2\16\u0112\3\2\2\2\20\u0114\3\2\2\2\22\u011c\3\2\2\2\24\u0138\3"+ + "\2\2\2\26\u0142\3\2\2\2\30\u014c\3\2\2\2\32\u015b\3\2\2\2\34\u015d\3\2"+ + "\2\2\36\u0163\3\2\2\2 \u0165\3\2\2\2\"\u016c\3\2\2\2$\u017e\3\2\2\2&\u018f"+ + "\3\2\2\2(\u019f\3\2\2\2*\u01bd\3\2\2\2,\u01bf\3\2\2\2.\u01e0\3\2\2\2\60"+ + "\u01f1\3\2\2\2\62\u01f4\3\2\2\2\64\u0226\3\2\2\2\66\u0228\3\2\2\28\u022b"+ + "\3\2\2\2:\u0235\3\2\2\2<\u023b\3\2\2\2>\u0270\3\2\2\2@\u027d\3\2\2\2B"+ + "\u0289\3\2\2\2D\u028b\3\2\2\2F\u0292\3\2\2\2H\u029e\3\2\2\2J\u02a0\3\2"+ + "\2\2L\u02ac\3\2\2\2N\u02ae\3\2\2\2P\u02c2\3\2\2\2R\u02de\3\2\2\2T\u02e0"+ + "\3\2\2\2V\u02e2\3\2\2\2X\u02e4\3\2\2\2Z\u02f1\3\2\2\2\\\u02f3\3\2\2\2"+ + "^\u02fa\3\2\2\2`\u0301\3\2\2\2b\u030f\3\2\2\2d\u0313\3\2\2\2f\u0318\3"+ + "\2\2\2h\u031c\3\2\2\2j\u031e\3\2\2\2l\u0320\3\2\2\2n\u0325\3\2\2\2pq\5"+ + "\6\4\2qr\7\2\2\3r\3\3\2\2\2st\5,\27\2tu\7\2\2\3u\5\3\2\2\2v\u00e8\5\b"+ + "\5\2w\u0085\7$\2\2x\u0081\7\3\2\2yz\7M\2\2z\u0080\t\2\2\2{|\7(\2\2|\u0080"+ + "\t\3\2\2}~\7`\2\2~\u0080\5V,\2\177y\3\2\2\2\177{\3\2\2\2\177}\3\2\2\2"+ "\u0080\u0083\3\2\2\2\u0081\177\3\2\2\2\u0081\u0082\3\2\2\2\u0082\u0084"+ "\3\2\2\2\u0083\u0081\3\2\2\2\u0084\u0086\7\4\2\2\u0085x\3\2\2\2\u0085"+ - "\u0086\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u00dc\5\6\4\2\u0088\u0094\7\33"+ - "\2\2\u0089\u0090\7\3\2\2\u008a\u008b\7K\2\2\u008b\u008f\t\4\2\2\u008c"+ + "\u0086\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u00e8\5\6\4\2\u0088\u0094\7\33"+ + "\2\2\u0089\u0090\7\3\2\2\u008a\u008b\7M\2\2\u008b\u008f\t\4\2\2\u008c"+ "\u008d\7(\2\2\u008d\u008f\t\3\2\2\u008e\u008a\3\2\2\2\u008e\u008c\3\2"+ "\2\2\u008f\u0092\3\2\2\2\u0090\u008e\3\2\2\2\u0090\u0091\3\2\2\2\u0091"+ "\u0093\3\2\2\2\u0092\u0090\3\2\2\2\u0093\u0095\7\4\2\2\u0094\u0089\3\2"+ - "\2\2\u0094\u0095\3\2\2\2\u0095\u0096\3\2\2\2\u0096\u00dc\5\6\4\2\u0097"+ - "\u0098\7S\2\2\u0098\u009b\7V\2\2\u0099\u009c\5\66\34\2\u009a\u009c\5b"+ - "\62\2\u009b\u0099\3\2\2\2\u009b\u009a\3\2\2\2\u009b\u009c\3\2\2\2\u009c"+ - "\u00dc\3\2\2\2\u009d\u009e\7S\2\2\u009e\u009f\7\24\2\2\u009f\u00a2\t\5"+ - "\2\2\u00a0\u00a3\5\66\34\2\u00a1\u00a3\5b\62\2\u00a2\u00a0\3\2\2\2\u00a2"+ - "\u00a1\3\2\2\2\u00a3\u00dc\3\2\2\2\u00a4\u00a7\t\6\2\2\u00a5\u00a8\5\66"+ - "\34\2\u00a6\u00a8\5b\62\2\u00a7\u00a5\3\2\2\2\u00a7\u00a6\3\2\2\2\u00a8"+ - "\u00dc\3\2\2\2\u00a9\u00aa\7S\2\2\u00aa\u00ac\7+\2\2\u00ab\u00ad\5\66"+ - "\34\2\u00ac\u00ab\3\2\2\2\u00ac\u00ad\3\2\2\2\u00ad\u00dc\3\2\2\2\u00ae"+ - "\u00af\7S\2\2\u00af\u00dc\7O\2\2\u00b0\u00b1\7T\2\2\u00b1\u00b4\7V\2\2"+ - "\u00b2\u00b3\7\22\2\2\u00b3\u00b5\5\66\34\2\u00b4\u00b2\3\2\2\2\u00b4"+ - "\u00b5\3\2\2\2\u00b5\u00b8\3\2\2\2\u00b6\u00b9\5\66\34\2\u00b7\u00b9\5"+ - "b\62\2\u00b8\u00b6\3\2\2\2\u00b8\u00b7\3\2\2\2\u00b8\u00b9\3\2\2\2\u00b9"+ - "\u00c3\3\2\2\2\u00ba\u00bb\7[\2\2\u00bb\u00c0\5j\66\2\u00bc\u00bd\7\5"+ - "\2\2\u00bd\u00bf\5j\66\2\u00be\u00bc\3\2\2\2\u00bf\u00c2\3\2\2\2\u00c0"+ - "\u00be\3\2\2\2\u00c0\u00c1\3\2\2\2\u00c1\u00c4\3\2\2\2\u00c2\u00c0\3\2"+ - "\2\2\u00c3\u00ba\3\2\2\2\u00c3\u00c4\3\2\2\2\u00c4\u00dc\3\2\2\2\u00c5"+ - "\u00c6\7T\2\2\u00c6\u00c9\7\24\2\2\u00c7\u00c8\7\22\2\2\u00c8\u00ca\5"+ - "j\66\2\u00c9\u00c7\3\2\2\2\u00c9\u00ca\3\2\2\2\u00ca\u00ce\3\2\2\2\u00cb"+ - "\u00cc\7U\2\2\u00cc\u00cf\5\66\34\2\u00cd\u00cf\5b\62\2\u00ce\u00cb\3"+ - "\2\2\2\u00ce\u00cd\3\2\2\2\u00ce\u00cf\3\2\2\2\u00cf\u00d1\3\2\2\2\u00d0"+ - "\u00d2\5\66\34\2\u00d1\u00d0\3\2\2\2\u00d1\u00d2\3\2\2\2\u00d2\u00dc\3"+ - "\2\2\2\u00d3\u00d4\7T\2\2\u00d4\u00d9\7\\\2\2\u00d5\u00d7\t\7\2\2\u00d6"+ - "\u00d5\3\2\2\2\u00d6\u00d7\3\2\2\2\u00d7\u00d8\3\2\2\2\u00d8\u00da\5h"+ - "\65\2\u00d9\u00d6\3\2\2\2\u00d9\u00da\3\2\2\2\u00da\u00dc\3\2\2\2\u00db"+ - "v\3\2\2\2\u00dbw\3\2\2\2\u00db\u0088\3\2\2\2\u00db\u0097\3\2\2\2\u00db"+ - "\u009d\3\2\2\2\u00db\u00a4\3\2\2\2\u00db\u00a9\3\2\2\2\u00db\u00ae\3\2"+ - "\2\2\u00db\u00b0\3\2\2\2\u00db\u00c5\3\2\2\2\u00db\u00d3\3\2\2\2\u00dc"+ - "\7\3\2\2\2\u00dd\u00de\7a\2\2\u00de\u00e3\5\34\17\2\u00df\u00e0\7\5\2"+ - "\2\u00e0\u00e2\5\34\17\2\u00e1\u00df\3\2\2\2\u00e2\u00e5\3\2\2\2\u00e3"+ - "\u00e1\3\2\2\2\u00e3\u00e4\3\2\2\2\u00e4\u00e7\3\2\2\2\u00e5\u00e3\3\2"+ - "\2\2\u00e6\u00dd\3\2\2\2\u00e6\u00e7\3\2\2\2\u00e7\u00e8\3\2\2\2\u00e8"+ - "\u00e9\5\n\6\2\u00e9\t\3\2\2\2\u00ea\u00f5\5\16\b\2\u00eb\u00ec\7G\2\2"+ - "\u00ec\u00ed\7\17\2\2\u00ed\u00f2\5\20\t\2\u00ee\u00ef\7\5\2\2\u00ef\u00f1"+ - "\5\20\t\2\u00f0\u00ee\3\2\2\2\u00f1\u00f4\3\2\2\2\u00f2\u00f0\3\2\2\2"+ - "\u00f2\u00f3\3\2\2\2\u00f3\u00f6\3\2\2\2\u00f4\u00f2\3\2\2\2\u00f5\u00eb"+ - "\3\2\2\2\u00f5\u00f6\3\2\2\2\u00f6\u00f8\3\2\2\2\u00f7\u00f9\5\f\7\2\u00f8"+ - "\u00f7\3\2\2\2\u00f8\u00f9\3\2\2\2\u00f9\13\3\2\2\2\u00fa\u00fb\79\2\2"+ - "\u00fb\u0100\t\b\2\2\u00fc\u00fd\7f\2\2\u00fd\u00fe\t\b\2\2\u00fe\u0100"+ - "\7k\2\2\u00ff\u00fa\3\2\2\2\u00ff\u00fc\3\2\2\2\u0100\r\3\2\2\2\u0101"+ - "\u0107\5\22\n\2\u0102\u0103\7\3\2\2\u0103\u0104\5\n\6\2\u0104\u0105\7"+ - "\4\2\2\u0105\u0107\3\2\2\2\u0106\u0101\3\2\2\2\u0106\u0102\3\2\2\2\u0107"+ - "\17\3\2\2\2\u0108\u010a\5,\27\2\u0109\u010b\t\t\2\2\u010a\u0109\3\2\2"+ - "\2\u010a\u010b\3\2\2\2\u010b\u010e\3\2\2\2\u010c\u010d\7C\2\2\u010d\u010f"+ - "\t\n\2\2\u010e\u010c\3\2\2\2\u010e\u010f\3\2\2\2\u010f\21\3\2\2\2\u0110"+ - "\u0112\7R\2\2\u0111\u0113\5\36\20\2\u0112\u0111\3\2\2\2\u0112\u0113\3"+ - "\2\2\2\u0113\u0114\3\2\2\2\u0114\u0119\5 \21\2\u0115\u0116\7\5\2\2\u0116"+ - "\u0118\5 \21\2\u0117\u0115\3\2\2\2\u0118\u011b\3\2\2\2\u0119\u0117\3\2"+ - "\2\2\u0119\u011a\3\2\2\2\u011a\u011d\3\2\2\2\u011b\u0119\3\2\2\2\u011c"+ - "\u011e\5\24\13\2\u011d\u011c\3\2\2\2\u011d\u011e\3\2\2\2\u011e\u0121\3"+ - "\2\2\2\u011f\u0120\7`\2\2\u0120\u0122\5.\30\2\u0121\u011f\3\2\2\2\u0121"+ - "\u0122\3\2\2\2\u0122\u0126\3\2\2\2\u0123\u0124\7-\2\2\u0124\u0125\7\17"+ - "\2\2\u0125\u0127\5\26\f\2\u0126\u0123\3\2\2\2\u0126\u0127\3\2\2\2\u0127"+ - "\u012a\3\2\2\2\u0128\u0129\7.\2\2\u0129\u012b\5.\30\2\u012a\u0128\3\2"+ - "\2\2\u012a\u012b\3\2\2\2\u012b\23\3\2\2\2\u012c\u012d\7)\2\2\u012d\u0132"+ - "\5\"\22\2\u012e\u012f\7\5\2\2\u012f\u0131\5\"\22\2\u0130\u012e\3\2\2\2"+ - "\u0131\u0134\3\2\2\2\u0132\u0130\3\2\2\2\u0132\u0133\3\2\2\2\u0133\25"+ - "\3\2\2\2\u0134\u0132\3\2\2\2\u0135\u0137\5\36\20\2\u0136\u0135\3\2\2\2"+ - "\u0136\u0137\3\2\2\2\u0137\u0138\3\2\2\2\u0138\u013d\5\30\r\2\u0139\u013a"+ - "\7\5\2\2\u013a\u013c\5\30\r\2\u013b\u0139\3\2\2\2\u013c\u013f\3\2\2\2"+ - "\u013d\u013b\3\2\2\2\u013d\u013e\3\2\2\2\u013e\27\3\2\2\2\u013f\u013d"+ - "\3\2\2\2\u0140\u0141\5\32\16\2\u0141\31\3\2\2\2\u0142\u014b\7\3\2\2\u0143"+ - "\u0148\5,\27\2\u0144\u0145\7\5\2\2\u0145\u0147\5,\27\2\u0146\u0144\3\2"+ - "\2\2\u0147\u014a\3\2\2\2\u0148\u0146\3\2\2\2\u0148\u0149\3\2\2\2\u0149"+ - "\u014c\3\2\2\2\u014a\u0148\3\2\2\2\u014b\u0143\3\2\2\2\u014b\u014c\3\2"+ - "\2\2\u014c\u014d\3\2\2\2\u014d\u0150\7\4\2\2\u014e\u0150\5,\27\2\u014f"+ - "\u0142\3\2\2\2\u014f\u014e\3\2\2\2\u0150\33\3\2\2\2\u0151\u0152\5`\61"+ - "\2\u0152\u0153\7\f\2\2\u0153\u0154\7\3\2\2\u0154\u0155\5\n\6\2\u0155\u0156"+ - "\7\4\2\2\u0156\35\3\2\2\2\u0157\u0158\t\13\2\2\u0158\37\3\2\2\2\u0159"+ - "\u015e\5,\27\2\u015a\u015c\7\f\2\2\u015b\u015a\3\2\2\2\u015b\u015c\3\2"+ - "\2\2\u015c\u015d\3\2\2\2\u015d\u015f\5`\61\2\u015e\u015b\3\2\2\2\u015e"+ - "\u015f\3\2\2\2\u015f!\3\2\2\2\u0160\u0164\5*\26\2\u0161\u0163\5$\23\2"+ - "\u0162\u0161\3\2\2\2\u0163\u0166\3\2\2\2\u0164\u0162\3\2\2\2\u0164\u0165"+ - "\3\2\2\2\u0165#\3\2\2\2\u0166\u0164\3\2\2\2\u0167\u0168\5&\24\2\u0168"+ - "\u0169\7\65\2\2\u0169\u016b\5*\26\2\u016a\u016c\5(\25\2\u016b\u016a\3"+ - "\2\2\2\u016b\u016c\3\2\2\2\u016c\u0173\3\2\2\2\u016d\u016e\7@\2\2\u016e"+ - "\u016f\5&\24\2\u016f\u0170\7\65\2\2\u0170\u0171\5*\26\2\u0171\u0173\3"+ - "\2\2\2\u0172\u0167\3\2\2\2\u0172\u016d\3\2\2\2\u0173%\3\2\2\2\u0174\u0176"+ - "\7\62\2\2\u0175\u0174\3\2\2\2\u0175\u0176\3\2\2\2\u0176\u0184\3\2\2\2"+ - "\u0177\u0179\7\67\2\2\u0178\u017a\7H\2\2\u0179\u0178\3\2\2\2\u0179\u017a"+ - "\3\2\2\2\u017a\u0184\3\2\2\2\u017b\u017d\7L\2\2\u017c\u017e\7H\2\2\u017d"+ - "\u017c\3\2\2\2\u017d\u017e\3\2\2\2\u017e\u0184\3\2\2\2\u017f\u0181\7*"+ - "\2\2\u0180\u0182\7H\2\2\u0181\u0180\3\2\2\2\u0181\u0182\3\2\2\2\u0182"+ - "\u0184\3\2\2\2\u0183\u0175\3\2\2\2\u0183\u0177\3\2\2\2\u0183\u017b\3\2"+ - "\2\2\u0183\u017f\3\2\2\2\u0184\'\3\2\2\2\u0185\u0186\7D\2\2\u0186\u0194"+ - "\5.\30\2\u0187\u0188\7]\2\2\u0188\u0189\7\3\2\2\u0189\u018e\5`\61\2\u018a"+ - "\u018b\7\5\2\2\u018b\u018d\5`\61\2\u018c\u018a\3\2\2\2\u018d\u0190\3\2"+ - "\2\2\u018e\u018c\3\2\2\2\u018e\u018f\3\2\2\2\u018f\u0191\3\2\2\2\u0190"+ - "\u018e\3\2\2\2\u0191\u0192\7\4\2\2\u0192\u0194\3\2\2\2\u0193\u0185\3\2"+ - "\2\2\u0193\u0187\3\2\2\2\u0194)\3\2\2\2\u0195\u019a\5b\62\2\u0196\u0198"+ - "\7\f\2\2\u0197\u0196\3\2\2\2\u0197\u0198\3\2\2\2\u0198\u0199\3\2\2\2\u0199"+ - "\u019b\5^\60\2\u019a\u0197\3\2\2\2\u019a\u019b\3\2\2\2\u019b\u01af\3\2"+ - "\2\2\u019c\u019d\7\3\2\2\u019d\u019e\5\n\6\2\u019e\u01a3\7\4\2\2\u019f"+ - "\u01a1\7\f\2\2\u01a0\u019f\3\2\2\2\u01a0\u01a1\3\2\2\2\u01a1\u01a2\3\2"+ - "\2\2\u01a2\u01a4\5^\60\2\u01a3\u01a0\3\2\2\2\u01a3\u01a4\3\2\2\2\u01a4"+ - "\u01af\3\2\2\2\u01a5\u01a6\7\3\2\2\u01a6\u01a7\5\"\22\2\u01a7\u01ac\7"+ - "\4\2\2\u01a8\u01aa\7\f\2\2\u01a9\u01a8\3\2\2\2\u01a9\u01aa\3\2\2\2\u01aa"+ - "\u01ab\3\2\2\2\u01ab\u01ad\5^\60\2\u01ac\u01a9\3\2\2\2\u01ac\u01ad\3\2"+ - "\2\2\u01ad\u01af\3\2\2\2\u01ae\u0195\3\2\2\2\u01ae\u019c\3\2\2\2\u01ae"+ - "\u01a5\3\2\2\2\u01af+\3\2\2\2\u01b0\u01b1\5.\30\2\u01b1-\3\2\2\2\u01b2"+ - "\u01b3\b\30\1\2\u01b3\u01b4\7A\2\2\u01b4\u01d2\5.\30\n\u01b5\u01b6\7#"+ - "\2\2\u01b6\u01b7\7\3\2\2\u01b7\u01b8\5\b\5\2\u01b8\u01b9\7\4\2\2\u01b9"+ - "\u01d2\3\2\2\2\u01ba\u01bb\7N\2\2\u01bb\u01bc\7\3\2\2\u01bc\u01bd\5j\66"+ - "\2\u01bd\u01be\5\60\31\2\u01be\u01bf\7\4\2\2\u01bf\u01d2\3\2\2\2\u01c0"+ - "\u01c1\7;\2\2\u01c1\u01c2\7\3\2\2\u01c2\u01c3\5^\60\2\u01c3\u01c4\7\5"+ - "\2\2\u01c4\u01c5\5j\66\2\u01c5\u01c6\5\60\31\2\u01c6\u01c7\7\4\2\2\u01c7"+ - "\u01d2\3\2\2\2\u01c8\u01c9\7;\2\2\u01c9\u01ca\7\3\2\2\u01ca\u01cb\5j\66"+ - "\2\u01cb\u01cc\7\5\2\2\u01cc\u01cd\5j\66\2\u01cd\u01ce\5\60\31\2\u01ce"+ - "\u01cf\7\4\2\2\u01cf\u01d2\3\2\2\2\u01d0\u01d2\5\62\32\2\u01d1\u01b2\3"+ - "\2\2\2\u01d1\u01b5\3\2\2\2\u01d1\u01ba\3\2\2\2\u01d1\u01c0\3\2\2\2\u01d1"+ - "\u01c8\3\2\2\2\u01d1\u01d0\3\2\2\2\u01d2\u01db\3\2\2\2\u01d3\u01d4\f\4"+ - "\2\2\u01d4\u01d5\7\n\2\2\u01d5\u01da\5.\30\5\u01d6\u01d7\f\3\2\2\u01d7"+ - "\u01d8\7F\2\2\u01d8\u01da\5.\30\4\u01d9\u01d3\3\2\2\2\u01d9\u01d6\3\2"+ - "\2\2\u01da\u01dd\3\2\2\2\u01db\u01d9\3\2\2\2\u01db\u01dc\3\2\2\2\u01dc"+ - "/\3\2\2\2\u01dd\u01db\3\2\2\2\u01de\u01df\7\5\2\2\u01df\u01e1\5j\66\2"+ - "\u01e0\u01de\3\2\2\2\u01e1\u01e4\3\2\2\2\u01e2\u01e0\3\2\2\2\u01e2\u01e3"+ - "\3\2\2\2\u01e3\61\3\2\2\2\u01e4\u01e2\3\2\2\2\u01e5\u01e7\5<\37\2\u01e6"+ - "\u01e8\5\64\33\2\u01e7\u01e6\3\2\2\2\u01e7\u01e8\3\2\2\2\u01e8\63\3\2"+ - "\2\2\u01e9\u01eb\7A\2\2\u01ea\u01e9\3\2\2\2\u01ea\u01eb\3\2\2\2\u01eb"+ - "\u01ec\3\2\2\2\u01ec\u01ed\7\16\2\2\u01ed\u01ee\5<\37\2\u01ee\u01ef\7"+ - "\n\2\2\u01ef\u01f0\5<\37\2\u01f0\u0218\3\2\2\2\u01f1\u01f3\7A\2\2\u01f2"+ - "\u01f1\3\2\2\2\u01f2\u01f3\3\2\2\2\u01f3\u01f4\3\2\2\2\u01f4\u01f5\7\61"+ - "\2\2\u01f5\u01f6\7\3\2\2\u01f6\u01fb\5<\37\2\u01f7\u01f8\7\5\2\2\u01f8"+ - "\u01fa\5<\37\2\u01f9\u01f7\3\2\2\2\u01fa\u01fd\3\2\2\2\u01fb\u01f9\3\2"+ - "\2\2\u01fb\u01fc\3\2\2\2\u01fc\u01fe\3\2\2\2\u01fd\u01fb\3\2\2\2\u01fe"+ - "\u01ff\7\4\2\2\u01ff\u0218\3\2\2\2\u0200\u0202\7A\2\2\u0201\u0200\3\2"+ - "\2\2\u0201\u0202\3\2\2\2\u0202\u0203\3\2\2\2\u0203\u0204\7\61\2\2\u0204"+ - "\u0205\7\3\2\2\u0205\u0206\5\b\5\2\u0206\u0207\7\4\2\2\u0207\u0218\3\2"+ - "\2\2\u0208\u020a\7A\2\2\u0209\u0208\3\2\2\2\u0209\u020a\3\2\2\2\u020a"+ - "\u020b\3\2\2\2\u020b\u020c\78\2\2\u020c\u0218\58\35\2\u020d\u020f\7A\2"+ - "\2\u020e\u020d\3\2\2\2\u020e\u020f\3\2\2\2\u020f\u0210\3\2\2\2\u0210\u0211"+ - "\7M\2\2\u0211\u0218\5j\66\2\u0212\u0214\7\64\2\2\u0213\u0215\7A\2\2\u0214"+ - "\u0213\3\2\2\2\u0214\u0215\3\2\2\2\u0215\u0216\3\2\2\2\u0216\u0218\7B"+ - "\2\2\u0217\u01ea\3\2\2\2\u0217\u01f2\3\2\2\2\u0217\u0201\3\2\2\2\u0217"+ - "\u0209\3\2\2\2\u0217\u020e\3\2\2\2\u0217\u0212\3\2\2\2\u0218\65\3\2\2"+ - "\2\u0219\u021a\78\2\2\u021a\u021b\58\35\2\u021b\67\3\2\2\2\u021c\u021e"+ - "\5j\66\2\u021d\u021f\5:\36\2\u021e\u021d\3\2\2\2\u021e\u021f\3\2\2\2\u021f"+ - "9\3\2\2\2\u0220\u0221\7!\2\2\u0221\u0227\5j\66\2\u0222\u0223\7d\2\2\u0223"+ - "\u0224\5j\66\2\u0224\u0225\7k\2\2\u0225\u0227\3\2\2\2\u0226\u0220\3\2"+ - "\2\2\u0226\u0222\3\2\2\2\u0227;\3\2\2\2\u0228\u0229\b\37\1\2\u0229\u022d"+ - "\5> \2\u022a\u022b\t\7\2\2\u022b\u022d\5<\37\6\u022c\u0228\3\2\2\2\u022c"+ - "\u022a\3\2\2\2\u022d\u023a\3\2\2\2\u022e\u022f\f\5\2\2\u022f\u0230\t\f"+ - "\2\2\u0230\u0239\5<\37\6\u0231\u0232\f\4\2\2\u0232\u0233\t\7\2\2\u0233"+ - "\u0239\5<\37\5\u0234\u0235\f\3\2\2\u0235\u0236\5T+\2\u0236\u0237\5<\37"+ - "\4\u0237\u0239\3\2\2\2\u0238\u022e\3\2\2\2\u0238\u0231\3\2\2\2\u0238\u0234"+ - "\3\2\2\2\u0239\u023c\3\2\2\2\u023a\u0238\3\2\2\2\u023a\u023b\3\2\2\2\u023b"+ - "=\3\2\2\2\u023c\u023a\3\2\2\2\u023d\u023e\b \1\2\u023e\u0262\5B\"\2\u023f"+ - "\u0262\5H%\2\u0240\u0262\5@!\2\u0241\u0262\5R*\2\u0242\u0243\5^\60\2\u0243"+ - "\u0244\7z\2\2\u0244\u0246\3\2\2\2\u0245\u0242\3\2\2\2\u0245\u0246\3\2"+ - "\2\2\u0246\u0247\3\2\2\2\u0247\u0262\7u\2\2\u0248\u0262\5L\'\2\u0249\u024a"+ - "\7\3\2\2\u024a\u024b\5\b\5\2\u024b\u024c\7\4\2\2\u024c\u0262\3\2\2\2\u024d"+ - "\u0262\5^\60\2\u024e\u024f\7\3\2\2\u024f\u0250\5,\27\2\u0250\u0251\7\4"+ - "\2\2\u0251\u0262\3\2\2\2\u0252\u0254\7\20\2\2\u0253\u0255\5.\30\2\u0254"+ - "\u0253\3\2\2\2\u0254\u0255\3\2\2\2\u0255\u0257\3\2\2\2\u0256\u0258\5l"+ - "\67\2\u0257\u0256\3\2\2\2\u0258\u0259\3\2\2\2\u0259\u0257\3\2\2\2\u0259"+ - "\u025a\3\2\2\2\u025a\u025d\3\2\2\2\u025b\u025c\7\37\2\2\u025c\u025e\5"+ - ".\30\2\u025d\u025b\3\2\2\2\u025d\u025e\3\2\2\2\u025e\u025f\3\2\2\2\u025f"+ - "\u0260\7 \2\2\u0260\u0262\3\2\2\2\u0261\u023d\3\2\2\2\u0261\u023f\3\2"+ - "\2\2\u0261\u0240\3\2\2\2\u0261\u0241\3\2\2\2\u0261\u0245\3\2\2\2\u0261"+ - "\u0248\3\2\2\2\u0261\u0249\3\2\2\2\u0261\u024d\3\2\2\2\u0261\u024e\3\2"+ - "\2\2\u0261\u0252\3\2\2\2\u0262\u0268\3\2\2\2\u0263\u0264\f\f\2\2\u0264"+ - "\u0265\7x\2\2\u0265\u0267\5\\/\2\u0266\u0263\3\2\2\2\u0267\u026a\3\2\2"+ - "\2\u0268\u0266\3\2\2\2\u0268\u0269\3\2\2\2\u0269?\3\2\2\2\u026a\u0268"+ - "\3\2\2\2\u026b\u026f\7\30\2\2\u026c\u026f\7\26\2\2\u026d\u026f\7\27\2"+ - "\2\u026e\u026b\3\2\2\2\u026e\u026c\3\2\2\2\u026e\u026d\3\2\2\2\u026fA"+ - "\3\2\2\2\u0270\u027b\5D#\2\u0271\u0272\7e\2\2\u0272\u0273\5D#\2\u0273"+ - "\u0274\7k\2\2\u0274\u027b\3\2\2\2\u0275\u027b\5F$\2\u0276\u0277\7e\2\2"+ - "\u0277\u0278\5F$\2\u0278\u0279\7k\2\2\u0279\u027b\3\2\2\2\u027a\u0270"+ - "\3\2\2\2\u027a\u0271\3\2\2\2\u027a\u0275\3\2\2\2\u027a\u0276\3\2\2\2\u027b"+ - "C\3\2\2\2\u027c\u027d\7\21\2\2\u027d\u027e\7\3\2\2\u027e\u027f\5,\27\2"+ - "\u027f\u0280\7\f\2\2\u0280\u0281\5\\/\2\u0281\u0282\7\4\2\2\u0282E\3\2"+ - "\2\2\u0283\u0284\7\25\2\2\u0284\u0285\7\3\2\2\u0285\u0286\5,\27\2\u0286"+ - "\u0287\7\5\2\2\u0287\u0288\5\\/\2\u0288\u0289\7\4\2\2\u0289G\3\2\2\2\u028a"+ - "\u0290\5J&\2\u028b\u028c\7e\2\2\u028c\u028d\5J&\2\u028d\u028e\7k\2\2\u028e"+ - "\u0290\3\2\2\2\u028f\u028a\3\2\2\2\u028f\u028b\3\2\2\2\u0290I\3\2\2\2"+ - "\u0291\u0292\7%\2\2\u0292\u0293\7\3\2\2\u0293\u0294\5`\61\2\u0294\u0295"+ - "\7)\2\2\u0295\u0296\5<\37\2\u0296\u0297\7\4\2\2\u0297K\3\2\2\2\u0298\u029e"+ - "\5N(\2\u0299\u029a\7e\2\2\u029a\u029b\5N(\2\u029b\u029c\7k\2\2\u029c\u029e"+ - "\3\2\2\2\u029d\u0298\3\2\2\2\u029d\u0299\3\2\2\2\u029eM\3\2\2\2\u029f"+ - "\u02a0\5P)\2\u02a0\u02ac\7\3\2\2\u02a1\u02a3\5\36\20\2\u02a2\u02a1\3\2"+ - "\2\2\u02a2\u02a3\3\2\2\2\u02a3\u02a4\3\2\2\2\u02a4\u02a9\5,\27\2\u02a5"+ - "\u02a6\7\5\2\2\u02a6\u02a8\5,\27\2\u02a7\u02a5\3\2\2\2\u02a8\u02ab\3\2"+ - "\2\2\u02a9\u02a7\3\2\2\2\u02a9\u02aa\3\2\2\2\u02aa\u02ad\3\2\2\2\u02ab"+ - "\u02a9\3\2\2\2\u02ac\u02a2\3\2\2\2\u02ac\u02ad\3\2\2\2\u02ad\u02ae\3\2"+ - "\2\2\u02ae\u02af\7\4\2\2\u02afO\3\2\2\2\u02b0\u02b4\7\67\2\2\u02b1\u02b4"+ - "\7L\2\2\u02b2\u02b4\5`\61\2\u02b3\u02b0\3\2\2\2\u02b3\u02b1\3\2\2\2\u02b3"+ - "\u02b2\3\2\2\2\u02b4Q\3\2\2\2\u02b5\u02d0\7B\2\2\u02b6\u02d0\5X-\2\u02b7"+ - "\u02d0\5h\65\2\u02b8\u02d0\5V,\2\u02b9\u02bb\7|\2\2\u02ba\u02b9\3\2\2"+ - "\2\u02bb\u02bc\3\2\2\2\u02bc\u02ba\3\2\2\2\u02bc\u02bd\3\2\2\2\u02bd\u02d0"+ - "\3\2\2\2\u02be\u02d0\7{\2\2\u02bf\u02c0\7g\2\2\u02c0\u02c1\5j\66\2\u02c1"+ - "\u02c2\7k\2\2\u02c2\u02d0\3\2\2\2\u02c3\u02c4\7h\2\2\u02c4\u02c5\5j\66"+ - "\2\u02c5\u02c6\7k\2\2\u02c6\u02d0\3\2\2\2\u02c7\u02c8\7i\2\2\u02c8\u02c9"+ - "\5j\66\2\u02c9\u02ca\7k\2\2\u02ca\u02d0\3\2\2\2\u02cb\u02cc\7j\2\2\u02cc"+ - "\u02cd\5j\66\2\u02cd\u02ce\7k\2\2\u02ce\u02d0\3\2\2\2\u02cf\u02b5\3\2"+ - "\2\2\u02cf\u02b6\3\2\2\2\u02cf\u02b7\3\2\2\2\u02cf\u02b8\3\2\2\2\u02cf"+ - "\u02ba\3\2\2\2\u02cf\u02be\3\2\2\2\u02cf\u02bf\3\2\2\2\u02cf\u02c3\3\2"+ - "\2\2\u02cf\u02c7\3\2\2\2\u02cf\u02cb\3\2\2\2\u02d0S\3\2\2\2\u02d1\u02d2"+ - "\t\r\2\2\u02d2U\3\2\2\2\u02d3\u02d4\t\16\2\2\u02d4W\3\2\2\2\u02d5\u02d7"+ - "\7\63\2\2\u02d6\u02d8\t\7\2\2\u02d7\u02d6\3\2\2\2\u02d7\u02d8\3\2\2\2"+ - "\u02d8\u02db\3\2\2\2\u02d9\u02dc\5h\65\2\u02da\u02dc\5j\66\2\u02db\u02d9"+ - "\3\2\2\2\u02db\u02da\3\2\2\2\u02dc\u02dd\3\2\2\2\u02dd\u02e0\5Z.\2\u02de"+ - "\u02df\7Z\2\2\u02df\u02e1\5Z.\2\u02e0\u02de\3\2\2\2\u02e0\u02e1\3\2\2"+ - "\2\u02e1Y\3\2\2\2\u02e2\u02e3\t\17\2\2\u02e3[\3\2\2\2\u02e4\u02e5\5`\61"+ - "\2\u02e5]\3\2\2\2\u02e6\u02e7\5`\61\2\u02e7\u02e8\7z\2\2\u02e8\u02ea\3"+ - "\2\2\2\u02e9\u02e6\3\2\2\2\u02ea\u02ed\3\2\2\2\u02eb\u02e9\3\2\2\2\u02eb"+ - "\u02ec\3\2\2\2\u02ec\u02ee\3\2\2\2\u02ed\u02eb\3\2\2\2\u02ee\u02ef\5`"+ - "\61\2\u02ef_\3\2\2\2\u02f0\u02f3\5d\63\2\u02f1\u02f3\5f\64\2\u02f2\u02f0"+ - "\3\2\2\2\u02f2\u02f1\3\2\2\2\u02f3a\3\2\2\2\u02f4\u02f5\5`\61\2\u02f5"+ - "\u02f6\7\6\2\2\u02f6\u02f8\3\2\2\2\u02f7\u02f4\3\2\2\2\u02f7\u02f8\3\2"+ - "\2\2\u02f8\u02f9\3\2\2\2\u02f9\u0301\7\u0081\2\2\u02fa\u02fb\5`\61\2\u02fb"+ - "\u02fc\7\6\2\2\u02fc\u02fe\3\2\2\2\u02fd\u02fa\3\2\2\2\u02fd\u02fe\3\2"+ - "\2\2\u02fe\u02ff\3\2\2\2\u02ff\u0301\5`\61\2\u0300\u02f7\3\2\2\2\u0300"+ - "\u02fd\3\2\2\2\u0301c\3\2\2\2\u0302\u0305\7\u0082\2\2\u0303\u0305\7\u0083"+ - "\2\2\u0304\u0302\3\2\2\2\u0304\u0303\3\2\2\2\u0305e\3\2\2\2\u0306\u030a"+ - "\7\177\2\2\u0307\u030a\5n8\2\u0308\u030a\7\u0080\2\2\u0309\u0306\3\2\2"+ - "\2\u0309\u0307\3\2\2\2\u0309\u0308\3\2\2\2\u030ag\3\2\2\2\u030b\u030e"+ - "\7~\2\2\u030c\u030e\7}\2\2\u030d\u030b\3\2\2\2\u030d\u030c\3\2\2\2\u030e"+ - "i\3\2\2\2\u030f\u0310\t\20\2\2\u0310k\3\2\2\2\u0311\u0312\7_\2\2\u0312"+ - "\u0313\5,\27\2\u0313\u0314\7X\2\2\u0314\u0315\5,\27\2\u0315m\3\2\2\2\u0316"+ - "\u0317\t\21\2\2\u0317o\3\2\2\2k\177\u0081\u0085\u008e\u0090\u0094\u009b"+ - "\u00a2\u00a7\u00ac\u00b4\u00b8\u00c0\u00c3\u00c9\u00ce\u00d1\u00d6\u00d9"+ - "\u00db\u00e3\u00e6\u00f2\u00f5\u00f8\u00ff\u0106\u010a\u010e\u0112\u0119"+ - "\u011d\u0121\u0126\u012a\u0132\u0136\u013d\u0148\u014b\u014f\u015b\u015e"+ - "\u0164\u016b\u0172\u0175\u0179\u017d\u0181\u0183\u018e\u0193\u0197\u019a"+ - "\u01a0\u01a3\u01a9\u01ac\u01ae\u01d1\u01d9\u01db\u01e2\u01e7\u01ea\u01f2"+ - "\u01fb\u0201\u0209\u020e\u0214\u0217\u021e\u0226\u022c\u0238\u023a\u0245"+ - "\u0254\u0259\u025d\u0261\u0268\u026e\u027a\u028f\u029d\u02a2\u02a9\u02ac"+ - "\u02b3\u02bc\u02cf\u02d7\u02db\u02e0\u02eb\u02f2\u02f7\u02fd\u0300\u0304"+ - "\u0309\u030d"; + "\2\2\u0094\u0095\3\2\2\2\u0095\u0096\3\2\2\2\u0096\u00e8\5\6\4\2\u0097"+ + "\u0098\7U\2\2\u0098\u009b\7X\2\2\u0099\u009a\7\63\2\2\u009a\u009c\7*\2"+ + "\2\u009b\u0099\3\2\2\2\u009b\u009c\3\2\2\2\u009c\u009f\3\2\2\2\u009d\u00a0"+ + "\5\66\34\2\u009e\u00a0\5b\62\2\u009f\u009d\3\2\2\2\u009f\u009e\3\2\2\2"+ + "\u009f\u00a0\3\2\2\2\u00a0\u00e8\3\2\2\2\u00a1\u00a2\7U\2\2\u00a2\u00a5"+ + "\7\24\2\2\u00a3\u00a4\7\63\2\2\u00a4\u00a6\7*\2\2\u00a5\u00a3\3\2\2\2"+ + "\u00a5\u00a6\3\2\2\2\u00a6\u00a7\3\2\2\2\u00a7\u00aa\t\5\2\2\u00a8\u00ab"+ + "\5\66\34\2\u00a9\u00ab\5b\62\2\u00aa\u00a8\3\2\2\2\u00aa\u00a9\3\2\2\2"+ + "\u00ab\u00e8\3\2\2\2\u00ac\u00af\t\6\2\2\u00ad\u00ae\7\63\2\2\u00ae\u00b0"+ + "\7*\2\2\u00af\u00ad\3\2\2\2\u00af\u00b0\3\2\2\2\u00b0\u00b3\3\2\2\2\u00b1"+ + "\u00b4\5\66\34\2\u00b2\u00b4\5b\62\2\u00b3\u00b1\3\2\2\2\u00b3\u00b2\3"+ + "\2\2\2\u00b4\u00e8\3\2\2\2\u00b5\u00b6\7U\2\2\u00b6\u00b8\7,\2\2\u00b7"+ + "\u00b9\5\66\34\2\u00b8\u00b7\3\2\2\2\u00b8\u00b9\3\2\2\2\u00b9\u00e8\3"+ + "\2\2\2\u00ba\u00bb\7U\2\2\u00bb\u00e8\7Q\2\2\u00bc\u00bd\7V\2\2\u00bd"+ + "\u00c0\7X\2\2\u00be\u00bf\7\22\2\2\u00bf\u00c1\5\66\34\2\u00c0\u00be\3"+ + "\2\2\2\u00c0\u00c1\3\2\2\2\u00c1\u00c4\3\2\2\2\u00c2\u00c5\5\66\34\2\u00c3"+ + "\u00c5\5b\62\2\u00c4\u00c2\3\2\2\2\u00c4\u00c3\3\2\2\2\u00c4\u00c5\3\2"+ + "\2\2\u00c5\u00cf\3\2\2\2\u00c6\u00c7\7]\2\2\u00c7\u00cc\5j\66\2\u00c8"+ + "\u00c9\7\5\2\2\u00c9\u00cb\5j\66\2\u00ca\u00c8\3\2\2\2\u00cb\u00ce\3\2"+ + "\2\2\u00cc\u00ca\3\2\2\2\u00cc\u00cd\3\2\2\2\u00cd\u00d0\3\2\2\2\u00ce"+ + "\u00cc\3\2\2\2\u00cf\u00c6\3\2\2\2\u00cf\u00d0\3\2\2\2\u00d0\u00e8\3\2"+ + "\2\2\u00d1\u00d2\7V\2\2\u00d2\u00d5\7\24\2\2\u00d3\u00d4\7\22\2\2\u00d4"+ + "\u00d6\5j\66\2\u00d5\u00d3\3\2\2\2\u00d5\u00d6\3\2\2\2\u00d6\u00da\3\2"+ + "\2\2\u00d7\u00d8\7W\2\2\u00d8\u00db\5\66\34\2\u00d9\u00db\5b\62\2\u00da"+ + "\u00d7\3\2\2\2\u00da\u00d9\3\2\2\2\u00da\u00db\3\2\2\2\u00db\u00dd\3\2"+ + "\2\2\u00dc\u00de\5\66\34\2\u00dd\u00dc\3\2\2\2\u00dd\u00de\3\2\2\2\u00de"+ + "\u00e8\3\2\2\2\u00df\u00e0\7V\2\2\u00e0\u00e5\7^\2\2\u00e1\u00e3\t\7\2"+ + "\2\u00e2\u00e1\3\2\2\2\u00e2\u00e3\3\2\2\2\u00e3\u00e4\3\2\2\2\u00e4\u00e6"+ + "\5h\65\2\u00e5\u00e2\3\2\2\2\u00e5\u00e6\3\2\2\2\u00e6\u00e8\3\2\2\2\u00e7"+ + "v\3\2\2\2\u00e7w\3\2\2\2\u00e7\u0088\3\2\2\2\u00e7\u0097\3\2\2\2\u00e7"+ + "\u00a1\3\2\2\2\u00e7\u00ac\3\2\2\2\u00e7\u00b5\3\2\2\2\u00e7\u00ba\3\2"+ + "\2\2\u00e7\u00bc\3\2\2\2\u00e7\u00d1\3\2\2\2\u00e7\u00df\3\2\2\2\u00e8"+ + "\7\3\2\2\2\u00e9\u00ea\7c\2\2\u00ea\u00ef\5\34\17\2\u00eb\u00ec\7\5\2"+ + "\2\u00ec\u00ee\5\34\17\2\u00ed\u00eb\3\2\2\2\u00ee\u00f1\3\2\2\2\u00ef"+ + "\u00ed\3\2\2\2\u00ef\u00f0\3\2\2\2\u00f0\u00f3\3\2\2\2\u00f1\u00ef\3\2"+ + "\2\2\u00f2\u00e9\3\2\2\2\u00f2\u00f3\3\2\2\2\u00f3\u00f4\3\2\2\2\u00f4"+ + "\u00f5\5\n\6\2\u00f5\t\3\2\2\2\u00f6\u0101\5\16\b\2\u00f7\u00f8\7I\2\2"+ + "\u00f8\u00f9\7\17\2\2\u00f9\u00fe\5\20\t\2\u00fa\u00fb\7\5\2\2\u00fb\u00fd"+ + "\5\20\t\2\u00fc\u00fa\3\2\2\2\u00fd\u0100\3\2\2\2\u00fe\u00fc\3\2\2\2"+ + "\u00fe\u00ff\3\2\2\2\u00ff\u0102\3\2\2\2\u0100\u00fe\3\2\2\2\u0101\u00f7"+ + "\3\2\2\2\u0101\u0102\3\2\2\2\u0102\u0104\3\2\2\2\u0103\u0105\5\f\7\2\u0104"+ + "\u0103\3\2\2\2\u0104\u0105\3\2\2\2\u0105\13\3\2\2\2\u0106\u0107\7;\2\2"+ + "\u0107\u010c\t\b\2\2\u0108\u0109\7h\2\2\u0109\u010a\t\b\2\2\u010a\u010c"+ + "\7m\2\2\u010b\u0106\3\2\2\2\u010b\u0108\3\2\2\2\u010c\r\3\2\2\2\u010d"+ + "\u0113\5\22\n\2\u010e\u010f\7\3\2\2\u010f\u0110\5\n\6\2\u0110\u0111\7"+ + "\4\2\2\u0111\u0113\3\2\2\2\u0112\u010d\3\2\2\2\u0112\u010e\3\2\2\2\u0113"+ + "\17\3\2\2\2\u0114\u0116\5,\27\2\u0115\u0117\t\t\2\2\u0116\u0115\3\2\2"+ + "\2\u0116\u0117\3\2\2\2\u0117\u011a\3\2\2\2\u0118\u0119\7E\2\2\u0119\u011b"+ + "\t\n\2\2\u011a\u0118\3\2\2\2\u011a\u011b\3\2\2\2\u011b\21\3\2\2\2\u011c"+ + "\u011e\7T\2\2\u011d\u011f\5\36\20\2\u011e\u011d\3\2\2\2\u011e\u011f\3"+ + "\2\2\2\u011f\u0120\3\2\2\2\u0120\u0125\5 \21\2\u0121\u0122\7\5\2\2\u0122"+ + "\u0124\5 \21\2\u0123\u0121\3\2\2\2\u0124\u0127\3\2\2\2\u0125\u0123\3\2"+ + "\2\2\u0125\u0126\3\2\2\2\u0126\u0129\3\2\2\2\u0127\u0125\3\2\2\2\u0128"+ + "\u012a\5\24\13\2\u0129\u0128\3\2\2\2\u0129\u012a\3\2\2\2\u012a\u012d\3"+ + "\2\2\2\u012b\u012c\7b\2\2\u012c\u012e\5.\30\2\u012d\u012b\3\2\2\2\u012d"+ + "\u012e\3\2\2\2\u012e\u0132\3\2\2\2\u012f\u0130\7.\2\2\u0130\u0131\7\17"+ + "\2\2\u0131\u0133\5\26\f\2\u0132\u012f\3\2\2\2\u0132\u0133\3\2\2\2\u0133"+ + "\u0136\3\2\2\2\u0134\u0135\7/\2\2\u0135\u0137\5.\30\2\u0136\u0134\3\2"+ + "\2\2\u0136\u0137\3\2\2\2\u0137\23\3\2\2\2\u0138\u0139\7)\2\2\u0139\u013e"+ + "\5\"\22\2\u013a\u013b\7\5\2\2\u013b\u013d\5\"\22\2\u013c\u013a\3\2\2\2"+ + "\u013d\u0140\3\2\2\2\u013e\u013c\3\2\2\2\u013e\u013f\3\2\2\2\u013f\25"+ + "\3\2\2\2\u0140\u013e\3\2\2\2\u0141\u0143\5\36\20\2\u0142\u0141\3\2\2\2"+ + "\u0142\u0143\3\2\2\2\u0143\u0144\3\2\2\2\u0144\u0149\5\30\r\2\u0145\u0146"+ + "\7\5\2\2\u0146\u0148\5\30\r\2\u0147\u0145\3\2\2\2\u0148\u014b\3\2\2\2"+ + "\u0149\u0147\3\2\2\2\u0149\u014a\3\2\2\2\u014a\27\3\2\2\2\u014b\u0149"+ + "\3\2\2\2\u014c\u014d\5\32\16\2\u014d\31\3\2\2\2\u014e\u0157\7\3\2\2\u014f"+ + "\u0154\5,\27\2\u0150\u0151\7\5\2\2\u0151\u0153\5,\27\2\u0152\u0150\3\2"+ + "\2\2\u0153\u0156\3\2\2\2\u0154\u0152\3\2\2\2\u0154\u0155\3\2\2\2\u0155"+ + "\u0158\3\2\2\2\u0156\u0154\3\2\2\2\u0157\u014f\3\2\2\2\u0157\u0158\3\2"+ + "\2\2\u0158\u0159\3\2\2\2\u0159\u015c\7\4\2\2\u015a\u015c\5,\27\2\u015b"+ + "\u014e\3\2\2\2\u015b\u015a\3\2\2\2\u015c\33\3\2\2\2\u015d\u015e\5`\61"+ + "\2\u015e\u015f\7\f\2\2\u015f\u0160\7\3\2\2\u0160\u0161\5\n\6\2\u0161\u0162"+ + "\7\4\2\2\u0162\35\3\2\2\2\u0163\u0164\t\13\2\2\u0164\37\3\2\2\2\u0165"+ + "\u016a\5,\27\2\u0166\u0168\7\f\2\2\u0167\u0166\3\2\2\2\u0167\u0168\3\2"+ + "\2\2\u0168\u0169\3\2\2\2\u0169\u016b\5`\61\2\u016a\u0167\3\2\2\2\u016a"+ + "\u016b\3\2\2\2\u016b!\3\2\2\2\u016c\u0170\5*\26\2\u016d\u016f\5$\23\2"+ + "\u016e\u016d\3\2\2\2\u016f\u0172\3\2\2\2\u0170\u016e\3\2\2\2\u0170\u0171"+ + "\3\2\2\2\u0171#\3\2\2\2\u0172\u0170\3\2\2\2\u0173\u0174\5&\24\2\u0174"+ + "\u0175\7\67\2\2\u0175\u0177\5*\26\2\u0176\u0178\5(\25\2\u0177\u0176\3"+ + "\2\2\2\u0177\u0178\3\2\2\2\u0178\u017f\3\2\2\2\u0179\u017a\7B\2\2\u017a"+ + "\u017b\5&\24\2\u017b\u017c\7\67\2\2\u017c\u017d\5*\26\2\u017d\u017f\3"+ + "\2\2\2\u017e\u0173\3\2\2\2\u017e\u0179\3\2\2\2\u017f%\3\2\2\2\u0180\u0182"+ + "\7\64\2\2\u0181\u0180\3\2\2\2\u0181\u0182\3\2\2\2\u0182\u0190\3\2\2\2"+ + "\u0183\u0185\79\2\2\u0184\u0186\7J\2\2\u0185\u0184\3\2\2\2\u0185\u0186"+ + "\3\2\2\2\u0186\u0190\3\2\2\2\u0187\u0189\7N\2\2\u0188\u018a\7J\2\2\u0189"+ + "\u0188\3\2\2\2\u0189\u018a\3\2\2\2\u018a\u0190\3\2\2\2\u018b\u018d\7+"+ + "\2\2\u018c\u018e\7J\2\2\u018d\u018c\3\2\2\2\u018d\u018e\3\2\2\2\u018e"+ + "\u0190\3\2\2\2\u018f\u0181\3\2\2\2\u018f\u0183\3\2\2\2\u018f\u0187\3\2"+ + "\2\2\u018f\u018b\3\2\2\2\u0190\'\3\2\2\2\u0191\u0192\7F\2\2\u0192\u01a0"+ + "\5.\30\2\u0193\u0194\7_\2\2\u0194\u0195\7\3\2\2\u0195\u019a\5`\61\2\u0196"+ + "\u0197\7\5\2\2\u0197\u0199\5`\61\2\u0198\u0196\3\2\2\2\u0199\u019c\3\2"+ + "\2\2\u019a\u0198\3\2\2\2\u019a\u019b\3\2\2\2\u019b\u019d\3\2\2\2\u019c"+ + "\u019a\3\2\2\2\u019d\u019e\7\4\2\2\u019e\u01a0\3\2\2\2\u019f\u0191\3\2"+ + "\2\2\u019f\u0193\3\2\2\2\u01a0)\3\2\2\2\u01a1\u01a3\7*\2\2\u01a2\u01a1"+ + "\3\2\2\2\u01a2\u01a3\3\2\2\2\u01a3\u01a4\3\2\2\2\u01a4\u01a9\5b\62\2\u01a5"+ + "\u01a7\7\f\2\2\u01a6\u01a5\3\2\2\2\u01a6\u01a7\3\2\2\2\u01a7\u01a8\3\2"+ + "\2\2\u01a8\u01aa\5^\60\2\u01a9\u01a6\3\2\2\2\u01a9\u01aa\3\2\2\2\u01aa"+ + "\u01be\3\2\2\2\u01ab\u01ac\7\3\2\2\u01ac\u01ad\5\n\6\2\u01ad\u01b2\7\4"+ + "\2\2\u01ae\u01b0\7\f\2\2\u01af\u01ae\3\2\2\2\u01af\u01b0\3\2\2\2\u01b0"+ + "\u01b1\3\2\2\2\u01b1\u01b3\5^\60\2\u01b2\u01af\3\2\2\2\u01b2\u01b3\3\2"+ + "\2\2\u01b3\u01be\3\2\2\2\u01b4\u01b5\7\3\2\2\u01b5\u01b6\5\"\22\2\u01b6"+ + "\u01bb\7\4\2\2\u01b7\u01b9\7\f\2\2\u01b8\u01b7\3\2\2\2\u01b8\u01b9\3\2"+ + "\2\2\u01b9\u01ba\3\2\2\2\u01ba\u01bc\5^\60\2\u01bb\u01b8\3\2\2\2\u01bb"+ + "\u01bc\3\2\2\2\u01bc\u01be\3\2\2\2\u01bd\u01a2\3\2\2\2\u01bd\u01ab\3\2"+ + "\2\2\u01bd\u01b4\3\2\2\2\u01be+\3\2\2\2\u01bf\u01c0\5.\30\2\u01c0-\3\2"+ + "\2\2\u01c1\u01c2\b\30\1\2\u01c2\u01c3\7C\2\2\u01c3\u01e1\5.\30\n\u01c4"+ + "\u01c5\7#\2\2\u01c5\u01c6\7\3\2\2\u01c6\u01c7\5\b\5\2\u01c7\u01c8\7\4"+ + "\2\2\u01c8\u01e1\3\2\2\2\u01c9\u01ca\7P\2\2\u01ca\u01cb\7\3\2\2\u01cb"+ + "\u01cc\5j\66\2\u01cc\u01cd\5\60\31\2\u01cd\u01ce\7\4\2\2\u01ce\u01e1\3"+ + "\2\2\2\u01cf\u01d0\7=\2\2\u01d0\u01d1\7\3\2\2\u01d1\u01d2\5^\60\2\u01d2"+ + "\u01d3\7\5\2\2\u01d3\u01d4\5j\66\2\u01d4\u01d5\5\60\31\2\u01d5\u01d6\7"+ + "\4\2\2\u01d6\u01e1\3\2\2\2\u01d7\u01d8\7=\2\2\u01d8\u01d9\7\3\2\2\u01d9"+ + "\u01da\5j\66\2\u01da\u01db\7\5\2\2\u01db\u01dc\5j\66\2\u01dc\u01dd\5\60"+ + "\31\2\u01dd\u01de\7\4\2\2\u01de\u01e1\3\2\2\2\u01df\u01e1\5\62\32\2\u01e0"+ + "\u01c1\3\2\2\2\u01e0\u01c4\3\2\2\2\u01e0\u01c9\3\2\2\2\u01e0\u01cf\3\2"+ + "\2\2\u01e0\u01d7\3\2\2\2\u01e0\u01df\3\2\2\2\u01e1\u01ea\3\2\2\2\u01e2"+ + "\u01e3\f\4\2\2\u01e3\u01e4\7\n\2\2\u01e4\u01e9\5.\30\5\u01e5\u01e6\f\3"+ + "\2\2\u01e6\u01e7\7H\2\2\u01e7\u01e9\5.\30\4\u01e8\u01e2\3\2\2\2\u01e8"+ + "\u01e5\3\2\2\2\u01e9\u01ec\3\2\2\2\u01ea\u01e8\3\2\2\2\u01ea\u01eb\3\2"+ + "\2\2\u01eb/\3\2\2\2\u01ec\u01ea\3\2\2\2\u01ed\u01ee\7\5\2\2\u01ee\u01f0"+ + "\5j\66\2\u01ef\u01ed\3\2\2\2\u01f0\u01f3\3\2\2\2\u01f1\u01ef\3\2\2\2\u01f1"+ + "\u01f2\3\2\2\2\u01f2\61\3\2\2\2\u01f3\u01f1\3\2\2\2\u01f4\u01f6\5<\37"+ + "\2\u01f5\u01f7\5\64\33\2\u01f6\u01f5\3\2\2\2\u01f6\u01f7\3\2\2\2\u01f7"+ + "\63\3\2\2\2\u01f8\u01fa\7C\2\2\u01f9\u01f8\3\2\2\2\u01f9\u01fa\3\2\2\2"+ + "\u01fa\u01fb\3\2\2\2\u01fb\u01fc\7\16\2\2\u01fc\u01fd\5<\37\2\u01fd\u01fe"+ + "\7\n\2\2\u01fe\u01ff\5<\37\2\u01ff\u0227\3\2\2\2\u0200\u0202\7C\2\2\u0201"+ + "\u0200\3\2\2\2\u0201\u0202\3\2\2\2\u0202\u0203\3\2\2\2\u0203\u0204\7\62"+ + "\2\2\u0204\u0205\7\3\2\2\u0205\u020a\5<\37\2\u0206\u0207\7\5\2\2\u0207"+ + "\u0209\5<\37\2\u0208\u0206\3\2\2\2\u0209\u020c\3\2\2\2\u020a\u0208\3\2"+ + "\2\2\u020a\u020b\3\2\2\2\u020b\u020d\3\2\2\2\u020c\u020a\3\2\2\2\u020d"+ + "\u020e\7\4\2\2\u020e\u0227\3\2\2\2\u020f\u0211\7C\2\2\u0210\u020f\3\2"+ + "\2\2\u0210\u0211\3\2\2\2\u0211\u0212\3\2\2\2\u0212\u0213\7\62\2\2\u0213"+ + "\u0214\7\3\2\2\u0214\u0215\5\b\5\2\u0215\u0216\7\4\2\2\u0216\u0227\3\2"+ + "\2\2\u0217\u0219\7C\2\2\u0218\u0217\3\2\2\2\u0218\u0219\3\2\2\2\u0219"+ + "\u021a\3\2\2\2\u021a\u021b\7:\2\2\u021b\u0227\58\35\2\u021c\u021e\7C\2"+ + "\2\u021d\u021c\3\2\2\2\u021d\u021e\3\2\2\2\u021e\u021f\3\2\2\2\u021f\u0220"+ + "\7O\2\2\u0220\u0227\5j\66\2\u0221\u0223\7\66\2\2\u0222\u0224\7C\2\2\u0223"+ + "\u0222\3\2\2\2\u0223\u0224\3\2\2\2\u0224\u0225\3\2\2\2\u0225\u0227\7D"+ + "\2\2\u0226\u01f9\3\2\2\2\u0226\u0201\3\2\2\2\u0226\u0210\3\2\2\2\u0226"+ + "\u0218\3\2\2\2\u0226\u021d\3\2\2\2\u0226\u0221\3\2\2\2\u0227\65\3\2\2"+ + "\2\u0228\u0229\7:\2\2\u0229\u022a\58\35\2\u022a\67\3\2\2\2\u022b\u022d"+ + "\5j\66\2\u022c\u022e\5:\36\2\u022d\u022c\3\2\2\2\u022d\u022e\3\2\2\2\u022e"+ + "9\3\2\2\2\u022f\u0230\7!\2\2\u0230\u0236\5j\66\2\u0231\u0232\7f\2\2\u0232"+ + "\u0233\5j\66\2\u0233\u0234\7m\2\2\u0234\u0236\3\2\2\2\u0235\u022f\3\2"+ + "\2\2\u0235\u0231\3\2\2\2\u0236;\3\2\2\2\u0237\u0238\b\37\1\2\u0238\u023c"+ + "\5> \2\u0239\u023a\t\7\2\2\u023a\u023c\5<\37\6\u023b\u0237\3\2\2\2\u023b"+ + "\u0239\3\2\2\2\u023c\u0249\3\2\2\2\u023d\u023e\f\5\2\2\u023e\u023f\t\f"+ + "\2\2\u023f\u0248\5<\37\6\u0240\u0241\f\4\2\2\u0241\u0242\t\7\2\2\u0242"+ + "\u0248\5<\37\5\u0243\u0244\f\3\2\2\u0244\u0245\5T+\2\u0245\u0246\5<\37"+ + "\4\u0246\u0248\3\2\2\2\u0247\u023d\3\2\2\2\u0247\u0240\3\2\2\2\u0247\u0243"+ + "\3\2\2\2\u0248\u024b\3\2\2\2\u0249\u0247\3\2\2\2\u0249\u024a\3\2\2\2\u024a"+ + "=\3\2\2\2\u024b\u0249\3\2\2\2\u024c\u024d\b \1\2\u024d\u0271\5B\"\2\u024e"+ + "\u0271\5H%\2\u024f\u0271\5@!\2\u0250\u0271\5R*\2\u0251\u0252\5^\60\2\u0252"+ + "\u0253\7|\2\2\u0253\u0255\3\2\2\2\u0254\u0251\3\2\2\2\u0254\u0255\3\2"+ + "\2\2\u0255\u0256\3\2\2\2\u0256\u0271\7w\2\2\u0257\u0271\5L\'\2\u0258\u0259"+ + "\7\3\2\2\u0259\u025a\5\b\5\2\u025a\u025b\7\4\2\2\u025b\u0271\3\2\2\2\u025c"+ + "\u0271\5^\60\2\u025d\u025e\7\3\2\2\u025e\u025f\5,\27\2\u025f\u0260\7\4"+ + "\2\2\u0260\u0271\3\2\2\2\u0261\u0263\7\20\2\2\u0262\u0264\5.\30\2\u0263"+ + "\u0262\3\2\2\2\u0263\u0264\3\2\2\2\u0264\u0266\3\2\2\2\u0265\u0267\5l"+ + "\67\2\u0266\u0265\3\2\2\2\u0267\u0268\3\2\2\2\u0268\u0266\3\2\2\2\u0268"+ + "\u0269\3\2\2\2\u0269\u026c\3\2\2\2\u026a\u026b\7\37\2\2\u026b\u026d\5"+ + ".\30\2\u026c\u026a\3\2\2\2\u026c\u026d\3\2\2\2\u026d\u026e\3\2\2\2\u026e"+ + "\u026f\7 \2\2\u026f\u0271\3\2\2\2\u0270\u024c\3\2\2\2\u0270\u024e\3\2"+ + "\2\2\u0270\u024f\3\2\2\2\u0270\u0250\3\2\2\2\u0270\u0254\3\2\2\2\u0270"+ + "\u0257\3\2\2\2\u0270\u0258\3\2\2\2\u0270\u025c\3\2\2\2\u0270\u025d\3\2"+ + "\2\2\u0270\u0261\3\2\2\2\u0271\u0277\3\2\2\2\u0272\u0273\f\f\2\2\u0273"+ + "\u0274\7z\2\2\u0274\u0276\5\\/\2\u0275\u0272\3\2\2\2\u0276\u0279\3\2\2"+ + "\2\u0277\u0275\3\2\2\2\u0277\u0278\3\2\2\2\u0278?\3\2\2\2\u0279\u0277"+ + "\3\2\2\2\u027a\u027e\7\30\2\2\u027b\u027e\7\26\2\2\u027c\u027e\7\27\2"+ + "\2\u027d\u027a\3\2\2\2\u027d\u027b\3\2\2\2\u027d\u027c\3\2\2\2\u027eA"+ + "\3\2\2\2\u027f\u028a\5D#\2\u0280\u0281\7g\2\2\u0281\u0282\5D#\2\u0282"+ + "\u0283\7m\2\2\u0283\u028a\3\2\2\2\u0284\u028a\5F$\2\u0285\u0286\7g\2\2"+ + "\u0286\u0287\5F$\2\u0287\u0288\7m\2\2\u0288\u028a\3\2\2\2\u0289\u027f"+ + "\3\2\2\2\u0289\u0280\3\2\2\2\u0289\u0284\3\2\2\2\u0289\u0285\3\2\2\2\u028a"+ + "C\3\2\2\2\u028b\u028c\7\21\2\2\u028c\u028d\7\3\2\2\u028d\u028e\5,\27\2"+ + "\u028e\u028f\7\f\2\2\u028f\u0290\5\\/\2\u0290\u0291\7\4\2\2\u0291E\3\2"+ + "\2\2\u0292\u0293\7\25\2\2\u0293\u0294\7\3\2\2\u0294\u0295\5,\27\2\u0295"+ + "\u0296\7\5\2\2\u0296\u0297\5\\/\2\u0297\u0298\7\4\2\2\u0298G\3\2\2\2\u0299"+ + "\u029f\5J&\2\u029a\u029b\7g\2\2\u029b\u029c\5J&\2\u029c\u029d\7m\2\2\u029d"+ + "\u029f\3\2\2\2\u029e\u0299\3\2\2\2\u029e\u029a\3\2\2\2\u029fI\3\2\2\2"+ + "\u02a0\u02a1\7%\2\2\u02a1\u02a2\7\3\2\2\u02a2\u02a3\5`\61\2\u02a3\u02a4"+ + "\7)\2\2\u02a4\u02a5\5<\37\2\u02a5\u02a6\7\4\2\2\u02a6K\3\2\2\2\u02a7\u02ad"+ + "\5N(\2\u02a8\u02a9\7g\2\2\u02a9\u02aa\5N(\2\u02aa\u02ab\7m\2\2\u02ab\u02ad"+ + "\3\2\2\2\u02ac\u02a7\3\2\2\2\u02ac\u02a8\3\2\2\2\u02adM\3\2\2\2\u02ae"+ + "\u02af\5P)\2\u02af\u02bb\7\3\2\2\u02b0\u02b2\5\36\20\2\u02b1\u02b0\3\2"+ + "\2\2\u02b1\u02b2\3\2\2\2\u02b2\u02b3\3\2\2\2\u02b3\u02b8\5,\27\2\u02b4"+ + "\u02b5\7\5\2\2\u02b5\u02b7\5,\27\2\u02b6\u02b4\3\2\2\2\u02b7\u02ba\3\2"+ + "\2\2\u02b8\u02b6\3\2\2\2\u02b8\u02b9\3\2\2\2\u02b9\u02bc\3\2\2\2\u02ba"+ + "\u02b8\3\2\2\2\u02bb\u02b1\3\2\2\2\u02bb\u02bc\3\2\2\2\u02bc\u02bd\3\2"+ + "\2\2\u02bd\u02be\7\4\2\2\u02beO\3\2\2\2\u02bf\u02c3\79\2\2\u02c0\u02c3"+ + "\7N\2\2\u02c1\u02c3\5`\61\2\u02c2\u02bf\3\2\2\2\u02c2\u02c0\3\2\2\2\u02c2"+ + "\u02c1\3\2\2\2\u02c3Q\3\2\2\2\u02c4\u02df\7D\2\2\u02c5\u02df\5X-\2\u02c6"+ + "\u02df\5h\65\2\u02c7\u02df\5V,\2\u02c8\u02ca\7~\2\2\u02c9\u02c8\3\2\2"+ + "\2\u02ca\u02cb\3\2\2\2\u02cb\u02c9\3\2\2\2\u02cb\u02cc\3\2\2\2\u02cc\u02df"+ + "\3\2\2\2\u02cd\u02df\7}\2\2\u02ce\u02cf\7i\2\2\u02cf\u02d0\5j\66\2\u02d0"+ + "\u02d1\7m\2\2\u02d1\u02df\3\2\2\2\u02d2\u02d3\7j\2\2\u02d3\u02d4\5j\66"+ + "\2\u02d4\u02d5\7m\2\2\u02d5\u02df\3\2\2\2\u02d6\u02d7\7k\2\2\u02d7\u02d8"+ + "\5j\66\2\u02d8\u02d9\7m\2\2\u02d9\u02df\3\2\2\2\u02da\u02db\7l\2\2\u02db"+ + "\u02dc\5j\66\2\u02dc\u02dd\7m\2\2\u02dd\u02df\3\2\2\2\u02de\u02c4\3\2"+ + "\2\2\u02de\u02c5\3\2\2\2\u02de\u02c6\3\2\2\2\u02de\u02c7\3\2\2\2\u02de"+ + "\u02c9\3\2\2\2\u02de\u02cd\3\2\2\2\u02de\u02ce\3\2\2\2\u02de\u02d2\3\2"+ + "\2\2\u02de\u02d6\3\2\2\2\u02de\u02da\3\2\2\2\u02dfS\3\2\2\2\u02e0\u02e1"+ + "\t\r\2\2\u02e1U\3\2\2\2\u02e2\u02e3\t\16\2\2\u02e3W\3\2\2\2\u02e4\u02e6"+ + "\7\65\2\2\u02e5\u02e7\t\7\2\2\u02e6\u02e5\3\2\2\2\u02e6\u02e7\3\2\2\2"+ + "\u02e7\u02ea\3\2\2\2\u02e8\u02eb\5h\65\2\u02e9\u02eb\5j\66\2\u02ea\u02e8"+ + "\3\2\2\2\u02ea\u02e9\3\2\2\2\u02eb\u02ec\3\2\2\2\u02ec\u02ef\5Z.\2\u02ed"+ + "\u02ee\7\\\2\2\u02ee\u02f0\5Z.\2\u02ef\u02ed\3\2\2\2\u02ef\u02f0\3\2\2"+ + "\2\u02f0Y\3\2\2\2\u02f1\u02f2\t\17\2\2\u02f2[\3\2\2\2\u02f3\u02f4\5`\61"+ + "\2\u02f4]\3\2\2\2\u02f5\u02f6\5`\61\2\u02f6\u02f7\7|\2\2\u02f7\u02f9\3"+ + "\2\2\2\u02f8\u02f5\3\2\2\2\u02f9\u02fc\3\2\2\2\u02fa\u02f8\3\2\2\2\u02fa"+ + "\u02fb\3\2\2\2\u02fb\u02fd\3\2\2\2\u02fc\u02fa\3\2\2\2\u02fd\u02fe\5`"+ + "\61\2\u02fe_\3\2\2\2\u02ff\u0302\5d\63\2\u0300\u0302\5f\64\2\u0301\u02ff"+ + "\3\2\2\2\u0301\u0300\3\2\2\2\u0302a\3\2\2\2\u0303\u0304\5`\61\2\u0304"+ + "\u0305\7\6\2\2\u0305\u0307\3\2\2\2\u0306\u0303\3\2\2\2\u0306\u0307\3\2"+ + "\2\2\u0307\u0308\3\2\2\2\u0308\u0310\7\u0083\2\2\u0309\u030a\5`\61\2\u030a"+ + "\u030b\7\6\2\2\u030b\u030d\3\2\2\2\u030c\u0309\3\2\2\2\u030c\u030d\3\2"+ + "\2\2\u030d\u030e\3\2\2\2\u030e\u0310\5`\61\2\u030f\u0306\3\2\2\2\u030f"+ + "\u030c\3\2\2\2\u0310c\3\2\2\2\u0311\u0314\7\u0084\2\2\u0312\u0314\7\u0085"+ + "\2\2\u0313\u0311\3\2\2\2\u0313\u0312\3\2\2\2\u0314e\3\2\2\2\u0315\u0319"+ + "\7\u0081\2\2\u0316\u0319\5n8\2\u0317\u0319\7\u0082\2\2\u0318\u0315\3\2"+ + "\2\2\u0318\u0316\3\2\2\2\u0318\u0317\3\2\2\2\u0319g\3\2\2\2\u031a\u031d"+ + "\7\u0080\2\2\u031b\u031d\7\177\2\2\u031c\u031a\3\2\2\2\u031c\u031b\3\2"+ + "\2\2\u031di\3\2\2\2\u031e\u031f\t\20\2\2\u031fk\3\2\2\2\u0320\u0321\7"+ + "a\2\2\u0321\u0322\5,\27\2\u0322\u0323\7Z\2\2\u0323\u0324\5,\27\2\u0324"+ + "m\3\2\2\2\u0325\u0326\t\21\2\2\u0326o\3\2\2\2o\177\u0081\u0085\u008e\u0090"+ + "\u0094\u009b\u009f\u00a5\u00aa\u00af\u00b3\u00b8\u00c0\u00c4\u00cc\u00cf"+ + "\u00d5\u00da\u00dd\u00e2\u00e5\u00e7\u00ef\u00f2\u00fe\u0101\u0104\u010b"+ + "\u0112\u0116\u011a\u011e\u0125\u0129\u012d\u0132\u0136\u013e\u0142\u0149"+ + "\u0154\u0157\u015b\u0167\u016a\u0170\u0177\u017e\u0181\u0185\u0189\u018d"+ + "\u018f\u019a\u019f\u01a2\u01a6\u01a9\u01af\u01b2\u01b8\u01bb\u01bd\u01e0"+ + "\u01e8\u01ea\u01f1\u01f6\u01f9\u0201\u020a\u0210\u0218\u021d\u0223\u0226"+ + "\u022d\u0235\u023b\u0247\u0249\u0254\u0263\u0268\u026c\u0270\u0277\u027d"+ + "\u0289\u029e\u02ac\u02b1\u02b8\u02bb\u02c2\u02cb\u02de\u02e6\u02ea\u02ef"+ + "\u02fa\u0301\u0306\u030c\u030f\u0313\u0318\u031c"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/EsRelation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/EsRelation.java index a90fb751c5e70..01a88ef06fcec 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/EsRelation.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/EsRelation.java @@ -24,16 +24,18 @@ public class EsRelation extends LeafPlan { private final EsIndex index; private final List attrs; + private final boolean frozen; - public EsRelation(Source source, EsIndex index) { + public EsRelation(Source source, EsIndex index, boolean frozen) { super(source); this.index = index; - attrs = flatten(source, index.mapping()); + this.attrs = flatten(source, index.mapping()); + this.frozen = frozen; } @Override protected NodeInfo info() { - return NodeInfo.create(this, EsRelation::new, index); + return NodeInfo.create(this, EsRelation::new, index, frozen); } private static List flatten(Source source, Map mapping) { @@ -63,6 +65,10 @@ public EsIndex index() { return index; } + public boolean frozen() { + return frozen; + } + @Override public List output() { return attrs; @@ -75,7 +81,7 @@ public boolean expressionsResolved() { @Override public int hashCode() { - return Objects.hash(index); + return Objects.hash(index, frozen); } @Override @@ -89,7 +95,8 @@ public boolean equals(Object obj) { } EsRelation other = (EsRelation) obj; - return Objects.equals(index, other.index); + return Objects.equals(index, other.index) + && frozen == other.frozen; } private static final int TO_STRING_LIMIT = 52; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelation.java index fa9e2326f2cc5..09ce8023f25ba 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelation.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelation.java @@ -20,23 +20,25 @@ public class UnresolvedRelation extends LeafPlan implements Unresolvable { private final TableIdentifier table; + private final boolean frozen; private final String alias; private final String unresolvedMsg; - public UnresolvedRelation(Source source, TableIdentifier table, String alias) { - this(source, table, alias, null); + public UnresolvedRelation(Source source, TableIdentifier table, String alias, boolean frozen) { + this(source, table, alias, frozen, null); } - public UnresolvedRelation(Source source, TableIdentifier table, String alias, String unresolvedMessage) { + public UnresolvedRelation(Source source, TableIdentifier table, String alias, boolean frozen, String unresolvedMessage) { super(source); this.table = table; this.alias = alias; + this.frozen = frozen; this.unresolvedMsg = unresolvedMessage == null ? "Unknown index [" + table.index() + "]" : unresolvedMessage; } @Override protected NodeInfo info() { - return NodeInfo.create(this, UnresolvedRelation::new, table, alias, unresolvedMsg); + return NodeInfo.create(this, UnresolvedRelation::new, table, alias, frozen, unresolvedMsg); } public TableIdentifier table() { @@ -47,6 +49,10 @@ public String alias() { return alias; } + public boolean frozen() { + return frozen; + } + @Override public boolean resolved() { return false; @@ -86,6 +92,7 @@ public boolean equals(Object obj) { return source().equals(other.source()) && table.equals(other.table) && Objects.equals(alias, other.alias) + && Objects.equals(frozen, other.frozen) && unresolvedMsg.equals(other.unresolvedMsg); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java index ed21b52114064..7cddc3fc0a7e9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java @@ -31,11 +31,13 @@ public class ShowColumns extends Command { private final String index; private final LikePattern pattern; + private final boolean includeFrozen; - public ShowColumns(Source source, String index, LikePattern pattern) { + public ShowColumns(Source source, String index, LikePattern pattern, boolean includeFrozen) { super(source); this.index = index; this.pattern = pattern; + this.includeFrozen = includeFrozen; } public String index() { @@ -48,7 +50,7 @@ public LikePattern pattern() { @Override protected NodeInfo info() { - return NodeInfo.create(this, ShowColumns::new, index, pattern); + return NodeInfo.create(this, ShowColumns::new, index, pattern, includeFrozen); } @Override @@ -62,7 +64,9 @@ public List output() { public void execute(SqlSession session, ActionListener listener) { String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); String regex = pattern != null ? pattern.asJavaRegex() : null; - session.indexResolver().resolveAsMergedMapping(idx, regex, ActionListener.wrap( + + boolean withFrozen = includeFrozen || session.configuration().includeFrozen(); + session.indexResolver().resolveAsMergedMapping(idx, regex, withFrozen, ActionListener.wrap( indexResult -> { List> rows = emptyList(); if (indexResult.isValid()) { @@ -92,7 +96,7 @@ private void fillInRows(Map mapping, String prefix, List info() { - return NodeInfo.create(this, ShowTables::new, index, pattern); + return NodeInfo.create(this, ShowTables::new, index, pattern, includeFrozen); } public String index() { @@ -46,23 +50,28 @@ public LikePattern pattern() { @Override public List output() { - return asList(keyword("name"), keyword("type")); + return asList(keyword("name"), keyword("type"), keyword("kind")); } @Override public final void execute(SqlSession session, ActionListener listener) { String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); String regex = pattern != null ? pattern.asJavaRegex() : null; - session.indexResolver().resolveNames(idx, regex, null, ActionListener.wrap(result -> { + + // to avoid redundancy, indicate whether frozen fields are required by specifying the type + EnumSet withFrozen = session.configuration().includeFrozen() || includeFrozen ? + IndexType.VALID_INCLUDE_FROZEN : IndexType.VALID_REGULAR; + + session.indexResolver().resolveNames(idx, regex, withFrozen, ActionListener.wrap(result -> { listener.onResponse(Rows.of(output(), result.stream() - .map(t -> asList(t.name(), t.type().toSql())) + .map(t -> asList(t.name(), t.type().toSql(), t.type().toNative())) .collect(toList()))); }, listener::onFailure)); } @Override public int hashCode() { - return Objects.hash(index, pattern); + return Objects.hash(index, pattern, includeFrozen); } @Override @@ -76,7 +85,8 @@ public boolean equals(Object obj) { } ShowTables other = (ShowTables) obj; - return Objects.equals(index, other.index) - && Objects.equals(pattern, other.pattern); + return Objects.equals(index, other.index) + && Objects.equals(pattern, other.pattern) + && includeFrozen == other.includeFrozen; } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java index 674045ab692fa..16d6eae924e36 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java @@ -109,16 +109,17 @@ public void execute(SqlSession session, ActionListener listener) { } // save original index name (as the pattern can contain special chars) - String indexName = index != null ? index : (pattern != null ? StringUtils.likeToUnescaped(pattern.pattern(), - pattern.escape()) : ""); + String indexName = index != null ? index : + (pattern != null ? StringUtils.likeToUnescaped(pattern.pattern(), pattern.escape()) : ""); String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); String regex = pattern != null ? pattern.asJavaRegex() : null; Pattern columnMatcher = columnPattern != null ? Pattern.compile(columnPattern.asJavaRegex()) : null; + boolean includeFrozen = session.configuration().includeFrozen(); // special case for '%' (translated to *) if ("*".equals(idx)) { - session.indexResolver().resolveAsSeparateMappings(idx, regex, ActionListener.wrap(esIndices -> { + session.indexResolver().resolveAsSeparateMappings(idx, regex, includeFrozen, ActionListener.wrap(esIndices -> { List> rows = new ArrayList<>(); for (EsIndex esIndex : esIndices) { fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher, mode); @@ -129,7 +130,7 @@ public void execute(SqlSession session, ActionListener listener) { } // otherwise use a merged mapping else { - session.indexResolver().resolveAsMergedMapping(idx, regex, ActionListener.wrap(r -> { + session.indexResolver().resolveAsMergedMapping(idx, regex, includeFrozen, ActionListener.wrap(r -> { List> rows = new ArrayList<>(); // populate the data only when a target is found if (r.isValid() == true) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java index 53f1e1019b753..111b392adb6b7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java @@ -22,6 +22,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Objects; +import java.util.Set; import java.util.regex.Pattern; import static java.util.Arrays.asList; @@ -89,6 +90,8 @@ public final void execute(SqlSession session, ActionListener liste } } + boolean includeFrozen = session.configuration().includeFrozen(); + // enumerate types // if no types are specified (the parser takes care of the % case) if (types == null) { @@ -98,7 +101,10 @@ public final void execute(SqlSession session, ActionListener liste && pattern != null && pattern.pattern().isEmpty() && index == null) { List> values = new ArrayList<>(); // send only the types, everything else is made of empty strings - for (IndexType type : IndexType.VALID) { + // NB: since the types are sent in SQL, frozen doesn't have to be taken into account since + // it's just another BASE TABLE + Set typeSet = IndexType.VALID_REGULAR; + for (IndexType type : typeSet) { Object[] enumeration = new Object[10]; enumeration[3] = type.toSql(); values.add(asList(enumeration)); @@ -110,6 +116,7 @@ public final void execute(SqlSession session, ActionListener liste } } + // no enumeration pattern found, list actual tables String cRegex = clusterPattern != null ? clusterPattern.asJavaRegex() : null; @@ -122,7 +129,18 @@ public final void execute(SqlSession session, ActionListener liste String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); String regex = pattern != null ? pattern.asJavaRegex() : null; - session.indexResolver().resolveNames(idx, regex, types, ActionListener.wrap(result -> listener.onResponse( + EnumSet tableTypes = types; + + // initialize types for name resolution + if (tableTypes == null) { + tableTypes = includeFrozen ? IndexType.VALID_INCLUDE_FROZEN : IndexType.VALID_REGULAR; + } else { + if (includeFrozen && tableTypes.contains(IndexType.FROZEN_INDEX) == false) { + tableTypes.add(IndexType.FROZEN_INDEX); + } + } + + session.indexResolver().resolveNames(idx, regex, tableTypes, ActionListener.wrap(result -> listener.onResponse( Rows.of(output(), result.stream() // sort by type (which might be legacy), then by name .sorted(Comparator. comparing(i -> legacyName(i.type())) @@ -142,7 +160,7 @@ public final void execute(SqlSession session, ActionListener liste } private String legacyName(IndexType indexType) { - return legacyTableTypes && indexType == IndexType.INDEX ? "TABLE" : indexType.toSql(); + return legacyTableTypes && IndexType.INDICES_ONLY.contains(indexType) ? IndexType.SQL_TABLE : indexType.toSql(); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java index 92a60b4ee5576..b32ad961ae958 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java @@ -11,10 +11,10 @@ import org.elasticsearch.xpack.sql.plan.logical.Filter; import org.elasticsearch.xpack.sql.plan.logical.Join; import org.elasticsearch.xpack.sql.plan.logical.Limit; +import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.logical.OrderBy; import org.elasticsearch.xpack.sql.plan.logical.Project; -import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; import org.elasticsearch.xpack.sql.plan.logical.With; import org.elasticsearch.xpack.sql.plan.logical.command.Command; import org.elasticsearch.xpack.sql.plan.physical.AggregateExec; @@ -22,10 +22,10 @@ import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.sql.plan.physical.FilterExec; import org.elasticsearch.xpack.sql.plan.physical.LimitExec; +import org.elasticsearch.xpack.sql.plan.physical.LocalExec; import org.elasticsearch.xpack.sql.plan.physical.OrderExec; import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.sql.plan.physical.ProjectExec; -import org.elasticsearch.xpack.sql.plan.physical.LocalExec; import org.elasticsearch.xpack.sql.plan.physical.UnplannedExec; import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; import org.elasticsearch.xpack.sql.rule.Rule; @@ -91,7 +91,11 @@ protected PhysicalPlan map(LogicalPlan p) { if (p instanceof EsRelation) { EsRelation c = (EsRelation) p; List output = c.output(); - return new EsQueryExec(p.source(), c.index().name(), output, new QueryContainer()); + QueryContainer container = new QueryContainer(); + if (c.frozen()) { + container = container.withFrozen(); + } + return new EsQueryExec(p.source(), c.index().name(), output, container); } if (p instanceof Limit) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java index 56554185ce84b..802d6d37b7c40 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java @@ -149,7 +149,8 @@ protected PhysicalPlan rule(ProjectExec project) { new AttributeMap<>(processors), queryC.sort(), queryC.limit(), - queryC.shouldTrackHits()); + queryC.shouldTrackHits(), + queryC.shouldIncludeFrozen()); return new EsQueryExec(exec.source(), exec.index(), project.output(), clone); } return project; @@ -178,7 +179,8 @@ protected PhysicalPlan rule(FilterExec plan) { qContainer.scalarFunctions(), qContainer.sort(), qContainer.limit(), - qContainer.shouldTrackHits()); + qContainer.shouldTrackHits(), + qContainer.shouldIncludeFrozen()); return exec.with(qContainer); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java index 6ba8219c2b580..771ab30d0f4c1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java @@ -137,5 +137,4 @@ public RestResponse buildResponse(SqlQueryResponse response) throws Exception { public String getName() { return "sql_query"; } - } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java index d0c67f193b710..98bb25b8ebd81 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java @@ -47,7 +47,8 @@ public static void operation(PlanExecutor planExecutor, SqlClearCursorRequest re Cursor cursor = Cursors.decodeFromString(request.getCursor()); planExecutor.cleanCursor( new Configuration(DateUtils.UTC, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, - request.mode(), StringUtils.EMPTY, StringUtils.EMPTY, StringUtils.EMPTY, Protocol.FIELD_MULTI_VALUE_LENIENCY), + request.mode(), StringUtils.EMPTY, StringUtils.EMPTY, StringUtils.EMPTY, Protocol.FIELD_MULTI_VALUE_LENIENCY, + Protocol.INDEX_INCLUDE_FROZEN), cursor, ActionListener.wrap( success -> listener.onResponse(new SqlClearCursorResponse(success)), listener::onFailure)); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java index 44695f950224c..97fc583c3e886 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java @@ -72,7 +72,8 @@ public static void operation(PlanExecutor planExecutor, SqlQueryRequest request, // The configuration is always created however when dealing with the next page, only the timeouts are relevant // the rest having default values (since the query is already created) Configuration cfg = new Configuration(request.zoneId(), request.fetchSize(), request.requestTimeout(), request.pageTimeout(), - request.filter(), request.mode(), request.clientId(), username, clusterName, request.fieldMultiValueLeniency()); + request.filter(), request.mode(), request.clientId(), username, clusterName, request.fieldMultiValueLeniency(), + request.indexIncludeFrozen()); if (Strings.hasText(request.cursor()) == false) { planExecutor.sql(cfg, request.query(), request.params(), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java index 0b9132df29025..9101557d1cd1a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java @@ -56,7 +56,8 @@ protected void doExecute(Task task, SqlTranslateRequest request, ActionListener< Configuration cfg = new Configuration(request.zoneId(), request.fetchSize(), request.requestTimeout(), request.pageTimeout(), request.filter(), request.mode(), request.clientId(), - username(securityContext), clusterName(clusterService), Protocol.FIELD_MULTI_VALUE_LENIENCY); + username(securityContext), clusterName(clusterService), Protocol.FIELD_MULTI_VALUE_LENIENCY, + Protocol.INDEX_INCLUDE_FROZEN); planExecutor.searchSource(cfg, request.query(), request.params(), ActionListener.wrap( searchSourceBuilder -> listener.onResponse(new SqlTranslateResponse(searchSourceBuilder)), listener::onFailure)); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java index 329eb9b566a05..827eade2e5924 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java @@ -81,25 +81,27 @@ public class QueryContainer { private final Set sort; private final int limit; private final boolean trackHits; + private final boolean includeFrozen; // computed private Boolean aggsOnly; private Boolean customSort; public QueryContainer() { - this(null, null, null, null, null, null, null, -1, false); + this(null, null, null, null, null, null, null, -1, false, false); } - public QueryContainer(Query query, - Aggs aggs, - List> fields, + public QueryContainer(Query query, + Aggs aggs, + List> fields, AttributeMap aliases, - Map pseudoFunctions, - AttributeMap scalarFunctions, - Set sort, - int limit, - boolean trackHits) { + Map pseudoFunctions, + AttributeMap scalarFunctions, + Set sort, + int limit, + boolean trackHits, + boolean includeFrozen) { this.query = query; this.aggs = aggs == null ? Aggs.EMPTY : aggs; this.fields = fields == null || fields.isEmpty() ? emptyList() : fields; @@ -109,6 +111,7 @@ public QueryContainer(Query query, this.sort = sort == null || sort.isEmpty() ? emptySet() : sort; this.limit = limit; this.trackHits = trackHits; + this.includeFrozen = includeFrozen; } /** @@ -237,42 +240,53 @@ public boolean shouldTrackHits() { return trackHits; } + public boolean shouldIncludeFrozen() { + return includeFrozen; + } + // // copy methods // public QueryContainer with(Query q) { - return new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits); + return new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen); } public QueryContainer withAliases(AttributeMap a) { - return new QueryContainer(query, aggs, fields, a, pseudoFunctions, scalarFunctions, sort, limit, trackHits); + return new QueryContainer(query, aggs, fields, a, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen); } public QueryContainer withPseudoFunctions(Map p) { - return new QueryContainer(query, aggs, fields, aliases, p, scalarFunctions, sort, limit, trackHits); + return new QueryContainer(query, aggs, fields, aliases, p, scalarFunctions, sort, limit, trackHits, includeFrozen); } public QueryContainer with(Aggs a) { - return new QueryContainer(query, a, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits); + return new QueryContainer(query, a, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen); } public QueryContainer withLimit(int l) { - return l == limit ? this : new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, l, trackHits); + return l == limit ? this : new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, l, trackHits, + includeFrozen); } public QueryContainer withTrackHits() { - return trackHits ? this : new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, true); + return trackHits ? this : new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, true, + includeFrozen); + } + + public QueryContainer withFrozen() { + return includeFrozen ? this : new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, + trackHits, true); } public QueryContainer withScalarProcessors(AttributeMap procs) { - return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, procs, sort, limit, trackHits); + return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, procs, sort, limit, trackHits, includeFrozen); } public QueryContainer addSort(Sort sortable) { Set sort = new LinkedHashSet<>(this.sort); sort.add(sortable); - return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits); + return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen); } private String aliasName(Attribute attr) { @@ -294,7 +308,8 @@ private Tuple nestedHitFieldRef(FieldAttribute SearchHitFieldRef nestedFieldRef = new SearchHitFieldRef(name, attr.field().getDataType(), attr.field().isAggregatable(), attr.parent().name()); - return new Tuple<>(new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits), + return new Tuple<>( + new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits, includeFrozen), nestedFieldRef); } @@ -397,7 +412,7 @@ public QueryContainer addColumn(FieldExtraction ref, Attribute attr) { ExpressionId id = attr instanceof AggregateFunctionAttribute ? ((AggregateFunctionAttribute) attr).innerId() : attr.id(); return new QueryContainer(query, aggs, combine(fields, new Tuple<>(ref, id)), aliases, pseudoFunctions, scalarFunctions, - sort, limit, trackHits); + sort, limit, trackHits, includeFrozen); } public AttributeMap scalarFunctions() { @@ -430,7 +445,7 @@ public QueryContainer updateGroup(GroupByKey group) { @Override public int hashCode() { - return Objects.hash(query, aggs, fields, aliases, sort, limit); + return Objects.hash(query, aggs, fields, aliases, sort, limit, trackHits, includeFrozen); } @Override @@ -449,7 +464,9 @@ public boolean equals(Object obj) { && Objects.equals(fields, other.fields) && Objects.equals(aliases, other.aliases) && Objects.equals(sort, other.sort) - && Objects.equals(limit, other.limit); + && Objects.equals(limit, other.limit) + && Objects.equals(trackHits, other.trackHits) + && Objects.equals(includeFrozen, other.includeFrozen); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java index d03ac08305e3e..5b9901ccae447 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java @@ -25,6 +25,7 @@ public class Configuration { private final String clusterName; private final boolean multiValueFieldLeniency; private final ZonedDateTime now; + private final boolean includeFrozenIndices; @Nullable private QueryBuilder filter; @@ -32,7 +33,8 @@ public class Configuration { public Configuration(ZoneId zi, int pageSize, TimeValue requestTimeout, TimeValue pageTimeout, QueryBuilder filter, Mode mode, String clientId, String username, String clusterName, - boolean multiValueFieldLeniency) { + boolean multiValueFieldLeniency, + boolean includeFrozen) { this.zoneId = zi.normalized(); this.pageSize = pageSize; this.requestTimeout = requestTimeout; @@ -44,6 +46,7 @@ public Configuration(ZoneId zi, int pageSize, TimeValue requestTimeout, TimeValu this.clusterName = clusterName; this.multiValueFieldLeniency = multiValueFieldLeniency; this.now = ZonedDateTime.now(zoneId); + this.includeFrozenIndices = includeFrozen; } public ZoneId zoneId() { @@ -80,7 +83,7 @@ public String username() { public String clusterName() { return clusterName; } - + public ZonedDateTime now() { return now; } @@ -88,4 +91,8 @@ public ZonedDateTime now() { public boolean multiValueFieldLeniency() { return multiValueFieldLeniency; } + + public boolean includeFrozen() { + return includeFrozenIndices; + } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java index ae1a6f14da522..6a5b5bd2ae5fd 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java @@ -11,6 +11,7 @@ import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; import org.elasticsearch.xpack.sql.analysis.analyzer.PreAnalyzer; import org.elasticsearch.xpack.sql.analysis.analyzer.PreAnalyzer.PreAnalysis; +import org.elasticsearch.xpack.sql.analysis.analyzer.TableInfo; import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier; import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; @@ -128,7 +129,8 @@ private void preAnalyze(LogicalPlan parsed, Function act // Note: JOINs are not supported but we detect them when listener.onFailure(new MappingException("Queries with multiple indices are not supported")); } else if (preAnalysis.indices.size() == 1) { - TableIdentifier table = preAnalysis.indices.get(0); + TableInfo tableInfo = preAnalysis.indices.get(0); + TableIdentifier table = tableInfo.id(); String cluster = table.cluster(); @@ -136,7 +138,8 @@ private void preAnalyze(LogicalPlan parsed, Function act listener.onFailure(new MappingException("Cannot inspect indices in cluster/catalog [{}]", cluster)); } - indexResolver.resolveAsMergedMapping(table.index(), null, + boolean includeFrozen = configuration.includeFrozen() || tableInfo.isFrozen(); + indexResolver.resolveAsMergedMapping(table.index(), null, includeFrozen, wrap(indexResult -> listener.onResponse(action.apply(indexResult)), listener::onFailure)); } else { try { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java index afec8063edab0..edce320c5b484 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java @@ -15,6 +15,7 @@ import java.time.ZoneId; import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; +import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.elasticsearch.test.ESTestCase.randomIntBetween; import static org.elasticsearch.test.ESTestCase.randomNonNegativeLong; @@ -27,7 +28,7 @@ private TestUtils() {} public static final Configuration TEST_CFG = new Configuration(DateUtils.UTC, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, Mode.PLAIN, - null, null, null, false); + null, null, null, false, false); public static Configuration randomConfiguration() { return new Configuration(randomZone(), @@ -39,7 +40,8 @@ public static Configuration randomConfiguration() { randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10), - false); + false, + randomBoolean()); } public static Configuration randomConfiguration(ZoneId providedZoneId) { @@ -52,7 +54,8 @@ public static Configuration randomConfiguration(ZoneId providedZoneId) { randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10), - false); + false, + randomBoolean()); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzerTests.java index f37378c8fa994..ca3b2f14ebc80 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzerTests.java @@ -24,8 +24,8 @@ public void testBasicIndex() { PreAnalysis result = preAnalyzer.preAnalyze(plan); assertThat(plan.preAnalyzed(), is(true)); assertThat(result.indices, hasSize(1)); - assertThat(result.indices.get(0).cluster(), nullValue()); - assertThat(result.indices.get(0).index(), is("index")); + assertThat(result.indices.get(0).id().cluster(), nullValue()); + assertThat(result.indices.get(0).id().index(), is("index")); } public void testBasicIndexWithCatalog() { @@ -33,8 +33,8 @@ public void testBasicIndexWithCatalog() { PreAnalysis result = preAnalyzer.preAnalyze(plan); assertThat(plan.preAnalyzed(), is(true)); assertThat(result.indices, hasSize(1)); - assertThat(result.indices.get(0).cluster(), is("elastic")); - assertThat(result.indices.get(0).index(), is("index")); + assertThat(result.indices.get(0).id().cluster(), is("elastic")); + assertThat(result.indices.get(0).id().index(), is("index")); } public void testWildIndexWithCatalog() { @@ -42,8 +42,8 @@ public void testWildIndexWithCatalog() { PreAnalysis result = preAnalyzer.preAnalyze(plan); assertThat(plan.preAnalyzed(), is(true)); assertThat(result.indices, hasSize(1)); - assertThat(result.indices.get(0).cluster(), is("elastic")); - assertThat(result.indices.get(0).index(), is("index*")); + assertThat(result.indices.get(0).id().cluster(), is("elastic")); + assertThat(result.indices.get(0).id().index(), is("index*")); } public void testQuotedIndex() { @@ -51,8 +51,8 @@ public void testQuotedIndex() { PreAnalysis result = preAnalyzer.preAnalyze(plan); assertThat(plan.preAnalyzed(), is(true)); assertThat(result.indices, hasSize(1)); - assertThat(result.indices.get(0).cluster(), nullValue()); - assertThat(result.indices.get(0).index(), is("aaa")); + assertThat(result.indices.get(0).id().cluster(), nullValue()); + assertThat(result.indices.get(0).id().index(), is("aaa")); } public void testQuotedCatalog() { @@ -60,8 +60,8 @@ public void testQuotedCatalog() { PreAnalysis result = preAnalyzer.preAnalyze(plan); assertThat(plan.preAnalyzed(), is(true)); assertThat(result.indices, hasSize(1)); - assertThat(result.indices.get(0).cluster(), is("elastic")); - assertThat(result.indices.get(0).index(), is("aaa")); + assertThat(result.indices.get(0).id().cluster(), is("elastic")); + assertThat(result.indices.get(0).id().index(), is("aaa")); } public void testComplicatedQuery() { @@ -69,7 +69,7 @@ public void testComplicatedQuery() { PreAnalysis result = preAnalyzer.preAnalyze(plan); assertThat(plan.preAnalyzed(), is(true)); assertThat(result.indices, hasSize(1)); - assertThat(result.indices.get(0).cluster(), nullValue()); - assertThat(result.indices.get(0).index(), is("aaa")); + assertThat(result.indices.get(0).id().cluster(), nullValue()); + assertThat(result.indices.get(0).id().index(), is("aaa")); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java index f2dccc396dbd3..811a8ff4256f2 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java @@ -30,7 +30,7 @@ public static CompositeAggregationCursor randomCompositeCursor() { } return new CompositeAggregationCursor(new byte[randomInt(256)], extractors, randomBitSet(extractorsSize), - randomIntBetween(10, 1024), randomAlphaOfLength(5)); + randomIntBetween(10, 1024), randomBoolean(), randomAlphaOfLength(5)); } static BucketExtractor randomBucketExtractor() { @@ -46,7 +46,7 @@ protected CompositeAggregationCursor mutateInstance(CompositeAggregationCursor i return new CompositeAggregationCursor(instance.next(), instance.extractors(), randomValueOtherThan(instance.mask(), () -> randomBitSet(instance.extractors().size())), randomValueOtherThan(instance.limit(), () -> randomIntBetween(1, 512)), - instance.indices()); + randomBoolean(), instance.indices()); } @Override diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/DatabaseFunctionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/DatabaseFunctionTests.java index 6581781c70072..0156d8fdfb59a 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/DatabaseFunctionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/DatabaseFunctionTests.java @@ -29,9 +29,9 @@ public void testDatabaseFunctionOutput() { EsIndex test = new EsIndex("test", TypesTests.loadMapping("mapping-basic.json", true)); Analyzer analyzer = new Analyzer( new Configuration(DateUtils.UTC, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, - Protocol.PAGE_TIMEOUT, null, + Protocol.PAGE_TIMEOUT, null, randomFrom(Mode.values()), randomAlphaOfLength(10), - null, clusterName, randomBoolean()), + null, clusterName, randomBoolean(), randomBoolean()), new FunctionRegistry(), IndexResolution.valid(test), new Verifier(new Metrics()) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/UserFunctionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/UserFunctionTests.java index 190bc273d7a5e..f8b3ed1976450 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/UserFunctionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/UserFunctionTests.java @@ -30,7 +30,8 @@ public void testNoUsernameFunctionOutput() { new Configuration(DateUtils.UTC, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, randomFrom(Mode.values()), randomAlphaOfLength(10), - null, randomAlphaOfLengthBetween(1, 15), randomBoolean()), + null, randomAlphaOfLengthBetween(1, 15), + randomBoolean(), randomBoolean()), new FunctionRegistry(), IndexResolution.valid(test), new Verifier(new Metrics()) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index b2e5eebe5ea5f..cf6530e2188ff 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -195,7 +195,7 @@ private static FieldAttribute getFieldAttribute(String name) { } public void testPruneSubqueryAliases() { - ShowTables s = new ShowTables(EMPTY, null, null); + ShowTables s = new ShowTables(EMPTY, null, null, false); SubQueryAlias plan = new SubQueryAlias(EMPTY, s, "show"); LogicalPlan result = new PruneSubqueryAliases().apply(plan); assertEquals(result, s); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelationTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelationTests.java index f8cfd179fd0f2..8dda75b889576 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelationTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelationTests.java @@ -7,8 +7,8 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.plan.TableIdentifier; -import org.elasticsearch.xpack.sql.tree.SourceTests; import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.tree.SourceTests; import java.util.ArrayList; import java.util.List; @@ -23,17 +23,19 @@ public void testEqualsAndHashCode() { TableIdentifier table = new TableIdentifier(source, randomAlphaOfLength(5), randomAlphaOfLength(5)); String alias = randomBoolean() ? null : randomAlphaOfLength(5); String unresolvedMessage = randomAlphaOfLength(5); - UnresolvedRelation relation = new UnresolvedRelation(source, table, alias, unresolvedMessage); + UnresolvedRelation relation = new UnresolvedRelation(source, table, alias, randomBoolean(), unresolvedMessage); List> mutators = new ArrayList<>(); mutators.add(r -> new UnresolvedRelation( SourceTests.mutate(r.source()), r.table(), r.alias(), + r.frozen(), r.unresolvedMessage())); mutators.add(r -> new UnresolvedRelation( r.source(), new TableIdentifier(r.source(), r.table().cluster(), r.table().index() + "m"), r.alias(), + r.frozen(), r.unresolvedMessage())); mutators.add(r -> new UnresolvedRelation( r.source(), @@ -41,14 +43,16 @@ public void testEqualsAndHashCode() { randomValueOtherThanMany( a -> Objects.equals(a, r.alias()), () -> randomBoolean() ? null : randomAlphaOfLength(5)), + r.frozen(), r.unresolvedMessage())); mutators.add(r -> new UnresolvedRelation( r.source(), r.table(), r.alias(), + r.frozen(), randomValueOtherThan(r.unresolvedMessage(), () -> randomAlphaOfLength(5)))); checkEqualsAndHashCode(relation, - r -> new UnresolvedRelation(r.source(), r.table(), r.alias(), r.unresolvedMessage()), + r -> new UnresolvedRelation(r.source(), r.table(), r.alias(), r.frozen(), r.unresolvedMessage()), r -> randomFrom(mutators).apply(r)); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java index bb4fb02ea7e85..f3f2d9569c53f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -37,6 +37,7 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.action.ActionListener.wrap; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -47,7 +48,7 @@ public class SysColumnsTests extends ESTestCase { private final SqlParser parser = new SqlParser(); private final Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json", true); - private final IndexInfo index = new IndexInfo("test_emp", IndexType.INDEX); + private final IndexInfo index = new IndexInfo("test_emp", IndexType.STANDARD_INDEX); private final IndexInfo alias = new IndexInfo("alias", IndexType.ALIAS); @@ -509,9 +510,9 @@ private void executeCommand(String sql, List params, Consume EsIndex test = new EsIndex("test", mapping); doAnswer(invocation -> { - ((ActionListener) invocation.getArguments()[2]).onResponse(IndexResolution.valid(test)); + ((ActionListener) invocation.getArguments()[3]).onResponse(IndexResolution.valid(test)); return Void.TYPE; - }).when(resolver).resolveAsMergedMapping(any(), any(), any()); + }).when(resolver).resolveAsMergedMapping(any(), any(), anyBoolean(), any()); tuple.v1().execute(tuple.v2(), wrap(consumer::accept, ex -> fail(ex.getMessage()))); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java index d7a24681329cb..be32e8e81f9b1 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java @@ -48,8 +48,9 @@ public class SysTablesTests extends ESTestCase { private final SqlParser parser = new SqlParser(); private final Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json", true); - private final IndexInfo index = new IndexInfo("test", IndexType.INDEX); + private final IndexInfo index = new IndexInfo("test", IndexType.STANDARD_INDEX); private final IndexInfo alias = new IndexInfo("alias", IndexType.ALIAS); + private final IndexInfo frozen = new IndexInfo("frozen", IndexType.FROZEN_INDEX); // // catalog enumeration @@ -107,7 +108,7 @@ public void testSysTablesTypesEnumeration() throws Exception { executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> { assertEquals(2, r.size()); - Iterator it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator(); + Iterator it = IndexType.VALID_REGULAR.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator(); for (int t = 0; t < r.size(); t++) { assertEquals(it.next().toSql(), r.column(3)); @@ -171,6 +172,20 @@ public void testSysTablesWithProperTypes() throws Exception { }, index, alias); } + public void testSysTablesWithProperTypesAndFrozen() throws Exception { + executeCommand("SYS TABLES TYPE 'BASE TABLE', 'ALIAS'", r -> { + assertEquals(3, r.size()); + assertEquals("frozen", r.column(2)); + assertEquals("BASE TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("test", r.column(2)); + assertEquals("BASE TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + assertEquals("VIEW", r.column(3)); + }, index, frozen, alias); + } + public void testSysTablesPattern() throws Exception { executeCommand("SYS TABLES LIKE '%'", r -> { assertEquals(2, r.size()); @@ -213,6 +228,15 @@ public void testSysTablesOnlyIndices() throws Exception { }, index); } + public void testSysTablesOnlyIndicesWithFrozen() throws Exception { + executeCommand("SYS TABLES LIKE 'test' TYPE 'BASE TABLE'", r -> { + assertEquals(2, r.size()); + assertEquals("frozen", r.column(2)); + assertTrue(r.advanceRow()); + assertEquals("test", r.column(2)); + }, index, frozen); + } + public void testSysTablesOnlyIndicesInLegacyMode() throws Exception { executeCommand("SYS TABLES LIKE 'test' TYPE 'TABLE'", r -> { assertEquals(1, r.size()); From a3b1e5f6d74cf682662e765507bdd9bb6aa9f49d Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Thu, 9 May 2019 17:38:25 -0400 Subject: [PATCH 060/321] Fix max_bucket test by disallowing partial results (#41959) The Max Bucket test can potentially return a partial response, where one of the shards suceeds but another fails due to the max_bucket setting. In the case of a partial failure, the status code is 200 OK since some results were returned (with failures listed in the body). This makes the yaml test fail since it is expecting a 4xx/5xx failure when catching exception messages. We need to disallow partial results so that the entire query fails and we can check for the max_bucket failure. --- .../test/search.aggregation/240_max_buckets.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml index 7761eb0f95155..9adf6343fbf95 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml @@ -75,9 +75,7 @@ setup: --- "Max bucket": - - skip: - version: "all" - reason: "AwaitsFix: https://github.com/elastic/elasticsearch/issues/41947" + - do: cluster.put_settings: body: @@ -88,6 +86,7 @@ setup: catch: /.*Trying to create too many buckets.*/ search: rest_total_hits_as_int: true + allow_partial_search_results: false index: test body: aggregations: @@ -105,6 +104,7 @@ setup: catch: /.*Trying to create too many buckets.*/ search: rest_total_hits_as_int: true + allow_partial_search_results: false index: test body: aggregations: From 0f26c728b473fda7bd44b0de870c99d93997da60 Mon Sep 17 00:00:00 2001 From: Christian Mesh Date: Thu, 9 May 2019 18:14:23 -0400 Subject: [PATCH 061/321] Add painless string split function (splitOnToken) (#39772) Adds two String split functions to Painless that can be used without enabling regexes. --- .../packages.asciidoc | 2 + .../painless/api/Augmentation.java | 49 +++++++++++++++++++ .../elasticsearch/painless/spi/java.lang.txt | 2 + .../painless/AugmentationTests.java | 40 +++++++++++++++ 4 files changed, 93 insertions(+) diff --git a/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc index f692141051200..75ad21ddc93f2 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc @@ -1253,6 +1253,8 @@ See the <> for a high-level overview * String {java11-javadoc}/java.base/java/lang/String.html#replace(java.lang.CharSequence,java.lang.CharSequence)[replace](CharSequence, CharSequence) * String replaceAll(Pattern, Function) * String replaceFirst(Pattern, Function) +* String[] splitOnToken(String) +* String[] splitOnToken(String, int) * boolean {java11-javadoc}/java.base/java/lang/String.html#startsWith(java.lang.String)[startsWith](String) * boolean {java11-javadoc}/java.base/java/lang/String.html#startsWith(java.lang.String,int)[startsWith](String, int) * CharSequence {java11-javadoc}/java.base/java/lang/CharSequence.html#subSequence(int,int)[subSequence](int, int) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java index 0b751b7d2f78f..bbbbc3dfc37cf 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java @@ -503,4 +503,53 @@ public static String encodeBase64(String receiver) { public static String decodeBase64(String receiver) { return new String(Base64.getDecoder().decode(receiver.getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8); } + + /** + * Split 'receiver' by 'token' as many times as possible.. + */ + public static String[] splitOnToken(String receiver, String token) { + return splitOnToken(receiver, token, -1); + } + + /** + * Split 'receiver' by 'token' up to 'limit' times. Any limit less than 1 is ignored. + */ + public static String[] splitOnToken(String receiver, String token, int limit) { + // Check if it's even possible to perform a split + if (receiver == null || receiver.length() == 0 || token == null || token.length() == 0 || receiver.length() < token.length()) { + return new String[] { receiver }; + } + + // List of string segments we have found + ArrayList result = new ArrayList(); + + // Keep track of where we are in the string + // indexOf(tok, startPos) is faster than creating a new search context ever loop with substring(start, end) + int pos = 0; + + // Loop until we hit the limit or forever if we are passed in less than one (signifying no limit) + // If Integer.MIN_VALUE is passed in, it will still continue to loop down to 1 from MAX_VALUE + // This edge case should be fine as we are limited by receiver length (Integer.MAX_VALUE) even if we split at every char + for(;limit != 1; limit--) { + + // Find the next occurrence of token after current pos + int idx = receiver.indexOf(token, pos); + + // Reached the end of the string without another match + if (idx == -1) { + break; + } + + // Add the found segment to the result list + result.add(receiver.substring(pos, idx)); + + // Move our search position to the next possible location + pos = idx + token.length(); + } + // Add the remaining string to the result list + result.add(receiver.substring(pos)); + + // O(N) or faster depending on implementation + return result.toArray(new String[0]); + } } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt index ef2d462127f36..63ed6d41c676d 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt @@ -758,6 +758,8 @@ class java.lang.String { String copyValueOf(char[],int,int) String org.elasticsearch.painless.api.Augmentation decodeBase64() String org.elasticsearch.painless.api.Augmentation encodeBase64() + String[] org.elasticsearch.painless.api.Augmentation splitOnToken(String) + String[] org.elasticsearch.painless.api.Augmentation splitOnToken(String, int) boolean endsWith(String) boolean equalsIgnoreCase(String) String format(Locale,String,def[]) diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java index a0d1c5a58917e..70fbb733e2f8f 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java @@ -23,6 +23,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.regex.Pattern; public class AugmentationTests extends ScriptTestCase { @@ -199,4 +200,43 @@ public void testFeatureTest() { assertEquals(8, exec("def ft = new org.elasticsearch.painless.FeatureTestObject();" + " ft.setX(3); ft.setY(2); return ft.addToTotal(3)")); } + + private static class SplitCase { + final String input; + final String token; + final int count; + + SplitCase(String input, String token, int count) { + this.input = input; + this.token = token; + this.count = count; + } + SplitCase(String input, String token) { + this(input, token, -1); + } + } + public void testString_SplitOnToken() { + SplitCase[] cases = new SplitCase[] { + new SplitCase("", ""), + new SplitCase("a,b,c", ","), + new SplitCase("a,b,c", "|"), + new SplitCase("a,,b,,c", ","), + new SplitCase("a,,b,,c", ",", 1), + new SplitCase("a,,b,,c", ",", 3), + new SplitCase("a,,b,,c", ",", 300), + new SplitCase("a,b,c", "a,b,c,d"), + new SplitCase("aaaaaaa", "a"), + new SplitCase("aaaaaaa", "a", 2), + new SplitCase("1.1.1.1.111", "1"), + new SplitCase("1.1.1.1.111", "."), + new SplitCase("1\n1.1.\r\n1\r\n111", "\r\n"), + }; + for (SplitCase split : cases) { + //System.out.println(String.format("Splitting '%s' by '%s' %d times", split.input, split.token, split.count)); + assertArrayEquals( + split.input.split(Pattern.quote(split.token), split.count), + (String[])exec("return \""+split.input+"\".splitOnToken(\""+split.token+"\", "+split.count+");") + ); + } + } } From 47115b65e907ae8893784077f914b81059960726 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Fri, 10 May 2019 07:02:34 +0200 Subject: [PATCH 062/321] Implement factory methods for ValidationException (#41993) Implement factory methods for ValidationException to make the client code more concise (1 LOC vs 3 LOC for a single error scenario) --- .../client/ValidationException.java | 24 ++++++++ .../client/ValidationExceptionTests.java | 56 +++++++++++++++++++ 2 files changed, 80 insertions(+) create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/ValidationExceptionTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java index 730ea7e95de12..6d7d801bcb4d2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java @@ -21,12 +21,36 @@ import org.elasticsearch.common.Nullable; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; /** * Encapsulates an accumulation of validation errors */ public class ValidationException extends IllegalArgumentException { + + /** + * Creates {@link ValidationException} instance initialized with given error messages. + * @param error the errors to add + * @return {@link ValidationException} instance + */ + public static ValidationException withError(String... error) { + return withErrors(Arrays.asList(error)); + } + + /** + * Creates {@link ValidationException} instance initialized with given error messages. + * @param errors the list of errors to add + * @return {@link ValidationException} instance + */ + public static ValidationException withErrors(List errors) { + ValidationException e = new ValidationException(); + for (String error : errors) { + e.addValidationError(error); + } + return e; + } + private final List validationErrors = new ArrayList<>(); /** diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ValidationExceptionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ValidationExceptionTests.java new file mode 100644 index 0000000000000..cde360b029839 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ValidationExceptionTests.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.hasSize; + +public class ValidationExceptionTests extends ESTestCase { + + private static final String ERROR = "some-error"; + private static final String OTHER_ERROR = "some-other-error"; + + public void testWithError() { + ValidationException e = ValidationException.withError(ERROR, OTHER_ERROR); + assertThat(e.validationErrors(), hasSize(2)); + assertThat(e.validationErrors(), contains(ERROR, OTHER_ERROR)); + } + + public void testWithErrors() { + ValidationException e = ValidationException.withErrors(Arrays.asList(ERROR, OTHER_ERROR)); + assertThat(e.validationErrors(), hasSize(2)); + assertThat(e.validationErrors(), contains(ERROR, OTHER_ERROR)); + } + + public void testAddValidationError() { + ValidationException e = new ValidationException(); + assertThat(e.validationErrors(), hasSize(0)); + e.addValidationError(ERROR); + assertThat(e.validationErrors(), hasSize(1)); + assertThat(e.validationErrors(), contains(ERROR)); + e.addValidationError(OTHER_ERROR); + assertThat(e.validationErrors(), hasSize(2)); + assertThat(e.validationErrors(), contains(ERROR, OTHER_ERROR)); + } +} From c80da0429c886fee92ddbc0b793a30e12638f652 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 9 May 2019 22:12:45 -0700 Subject: [PATCH 063/321] Improve jdk download tests (#42034) This commit reworks the tests for jdk download to test the old and new url pattern from oracle. Additionally it limits to one repository created per version, based on the old or new pattern, and restricts other repositories from trying to resolve jdks. closes #41998 --- .../gradle/JdkDownloadPlugin.java | 52 +++++++++++------- .../gradle/JdkDownloadPluginIT.java | 47 ++++++++++------ ...4_bin.tar.gz => fake_openjdk_linux.tar.gz} | Bin ...x64_bin.tar.gz => fake_openjdk_osx.tar.gz} | Bin ...s-x64_bin.zip => fake_openjdk_windows.zip} | Bin 5 files changed, 64 insertions(+), 35 deletions(-) rename buildSrc/src/test/resources/org/elasticsearch/gradle/{openjdk-1.0.2_linux-x64_bin.tar.gz => fake_openjdk_linux.tar.gz} (100%) rename buildSrc/src/test/resources/org/elasticsearch/gradle/{openjdk-1.0.2_osx-x64_bin.tar.gz => fake_openjdk_osx.tar.gz} (100%) rename buildSrc/src/test/resources/org/elasticsearch/gradle/{openjdk-1.0.2_windows-x64_bin.zip => fake_openjdk_windows.zip} (100%) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java index a6372dfd231ac..a408b66ec817d 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java @@ -27,6 +27,7 @@ import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.ConfigurationContainer; import org.gradle.api.artifacts.dsl.DependencyHandler; +import org.gradle.api.artifacts.dsl.RepositoryHandler; import org.gradle.api.artifacts.repositories.IvyArtifactRepository; import org.gradle.api.file.CopySpec; import org.gradle.api.file.FileTree; @@ -45,6 +46,8 @@ public class JdkDownloadPlugin implements Plugin { + private static final String REPO_NAME_PREFIX = "jdk_repo_"; + @Override public void apply(Project project) { NamedDomainObjectContainer jdksContainer = project.container(Jdk.class, name -> @@ -69,6 +72,13 @@ public void apply(Project project) { setupRootJdkDownload(project.getRootProject(), platform, version); } }); + + // all other repos should ignore the special jdk artifacts + project.getRootProject().getRepositories().all(repo -> { + if (repo.getName().startsWith(REPO_NAME_PREFIX) == false) { + repo.content(content -> content.excludeGroup("jdk")); + } + }); } private static void setupRootJdkDownload(Project rootProject, String platform, String version) { @@ -94,26 +104,30 @@ private static void setupRootJdkDownload(Project rootProject, String platform, S String hash = jdkVersionMatcher.group(5); // add fake ivy repo for jdk url - String repoName = "jdk_repo_" + version; + String repoName = REPO_NAME_PREFIX + version; + RepositoryHandler repositories = rootProject.getRepositories(); if (rootProject.getRepositories().findByName(repoName) == null) { - // simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back - rootProject.getRepositories().ivy(ivyRepo -> { - ivyRepo.setName(repoName); - ivyRepo.setUrl("https://download.oracle.com"); - ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - ivyRepo.patternLayout(layout -> - layout.artifact("java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); - ivyRepo.content(content -> content.includeGroup("jdk")); - }); - // current pattern since 12.0.1 - rootProject.getRepositories().ivy(ivyRepo -> { - ivyRepo.setName(repoName + "_with_hash"); - ivyRepo.setUrl("https://download.oracle.com"); - ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - ivyRepo.patternLayout(layout -> layout.artifact( - "java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); - ivyRepo.content(content -> content.includeGroup("jdk")); - }); + if (hash != null) { + // current pattern since 12.0.1 + repositories.ivy(ivyRepo -> { + ivyRepo.setName(repoName); + ivyRepo.setUrl("https://download.oracle.com"); + ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + ivyRepo.patternLayout(layout -> layout.artifact( + "java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.content(content -> content.includeGroup("jdk")); + }); + } else { + // simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back + repositories.ivy(ivyRepo -> { + ivyRepo.setName(repoName); + ivyRepo.setUrl("https://download.oracle.com"); + ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + ivyRepo.patternLayout(layout -> + layout.artifact("java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.content(content -> content.includeGroup("jdk")); + }); + } } // add the jdk as a "dependency" diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java index 5f982e1b47d93..9d612da610aca 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java @@ -41,20 +41,33 @@ public class JdkDownloadPluginIT extends GradleIntegrationTestCase { - private static final String FAKE_JDK_VERSION = "1.0.2+99"; + private static final String OLD_JDK_VERSION = "1+99"; + private static final String JDK_VERSION = "12.0.1+99@123456789123456789123456789abcde"; private static final Pattern JDK_HOME_LOGLINE = Pattern.compile("JDK HOME: (.*)"); private static final Pattern NUM_CONFIGS_LOGLINE = Pattern.compile("NUM CONFIGS: (.*)"); public void testLinuxExtraction() throws IOException { - assertExtraction("getLinuxJdk", "linux", "bin/java"); + assertExtraction("getLinuxJdk", "linux", "bin/java", JDK_VERSION); } public void testDarwinExtraction() throws IOException { - assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java"); + assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java", JDK_VERSION); } public void testWindowsExtraction() throws IOException { - assertExtraction("getWindowsJdk", "windows", "bin/java"); + assertExtraction("getWindowsJdk", "windows", "bin/java", JDK_VERSION); + } + + public void testLinuxExtractionOldVersion() throws IOException { + assertExtraction("getLinuxJdk", "linux", "bin/java", OLD_JDK_VERSION); + } + + public void testDarwinExtractionOldVersion() throws IOException { + assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java", OLD_JDK_VERSION); + } + + public void testWindowsExtractionOldVersion() throws IOException { + assertExtraction("getWindowsJdk", "windows", "bin/java", OLD_JDK_VERSION); } public void testCrossProjectReuse() throws IOException { @@ -62,39 +75,41 @@ public void testCrossProjectReuse() throws IOException { Matcher matcher = NUM_CONFIGS_LOGLINE.matcher(result.getOutput()); assertTrue("could not find num configs in output: " + result.getOutput(), matcher.find()); assertThat(Integer.parseInt(matcher.group(1)), equalTo(6)); // 3 import configs, 3 export configs - }); + }, JDK_VERSION); } - public void assertExtraction(String taskname, String platform, String javaBin) throws IOException { + public void assertExtraction(String taskname, String platform, String javaBin, String version) throws IOException { runBuild(taskname, platform, result -> { Matcher matcher = JDK_HOME_LOGLINE.matcher(result.getOutput()); assertTrue("could not find jdk home in output: " + result.getOutput(), matcher.find()); String jdkHome = matcher.group(1); Path javaPath = Paths.get(jdkHome, javaBin); assertTrue(javaPath.toString(), Files.exists(javaPath)); - }); + }, version); } - private void runBuild(String taskname, String platform, Consumer assertions) throws IOException { + private void runBuild(String taskname, String platform, Consumer assertions, String version) throws IOException { WireMockServer wireMock = new WireMockServer(0); try { String extension = platform.equals("windows") ? "zip" : "tar.gz"; - String filename = "openjdk-1.0.2_" + platform + "-x64_bin." + extension; - wireMock.stubFor(head(urlEqualTo("/java/GA/jdk1/99/GPL/" + filename)) - .willReturn(aResponse().withStatus(200))); + boolean isOld = version.equals(OLD_JDK_VERSION); + String filename = "openjdk-" + (isOld ? "1" : "12.0.1") + "_" + platform + "-x64_bin." + extension; final byte[] filebytes; - try (InputStream stream = JdkDownloadPluginIT.class.getResourceAsStream(filename)) { + try (InputStream stream = JdkDownloadPluginIT.class.getResourceAsStream("fake_openjdk_" + platform + "." + extension)) { filebytes = stream.readAllBytes(); } - wireMock.stubFor(get(urlEqualTo("/java/GA/jdk1/99/GPL/" + filename)) - .willReturn(aResponse().withStatus(200).withBody(filebytes))); + String versionPath = isOld ? "jdk1/99" : "jdk12.0.1/123456789123456789123456789abcde/99"; + String urlPath = "/java/GA/" + versionPath + "/GPL/" + filename; + wireMock.stubFor(head(urlEqualTo(urlPath)).willReturn(aResponse().withStatus(200))); + wireMock.stubFor(get(urlEqualTo(urlPath)).willReturn(aResponse().withStatus(200).withBody(filebytes))); wireMock.start(); GradleRunner runner = GradleRunner.create().withProjectDir(getProjectDir("jdk-download")) .withArguments(taskname, "-Dlocal.repo.path=" + getLocalTestRepoPath(), - "-Dtests.jdk_version=" + FAKE_JDK_VERSION, - "-Dtests.jdk_repo=" + wireMock.baseUrl()) + "-Dtests.jdk_version=" + version, + "-Dtests.jdk_repo=" + wireMock.baseUrl(), + "-i") .withPluginClasspath(); BuildResult result = runner.build(); diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_linux-x64_bin.tar.gz b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_linux.tar.gz similarity index 100% rename from buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_linux-x64_bin.tar.gz rename to buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_linux.tar.gz diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_osx-x64_bin.tar.gz b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_osx.tar.gz similarity index 100% rename from buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_osx-x64_bin.tar.gz rename to buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_osx.tar.gz diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_windows-x64_bin.zip b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_windows.zip similarity index 100% rename from buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_windows-x64_bin.zip rename to buildSrc/src/test/resources/org/elasticsearch/gradle/fake_openjdk_windows.zip From 9d94d575314289e88067bee4d2b88ff0dc05af5a Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 10 May 2019 09:07:50 +0100 Subject: [PATCH 064/321] [ML] Complete the Data Frame task on stop (#41752) Wait for indexer to stop then complete the persistent task on stop. If the wait_for_completion is true the request will not return until stopped. --- .../client/DataFrameTransformIT.java | 5 +- .../DataFrameTransformDocumentationIT.java | 3 +- .../DeleteDataFrameTransformAction.java | 87 ++---------- .../core/indexing/AsyncTwoPhaseIndexer.java | 24 +++- ...DataFrameTransformActionResponseTests.java | 22 --- .../indexing/AsyncTwoPhaseIndexerTests.java | 75 ++++++++-- .../integration/DataFrameIntegTestCase.java | 6 +- .../integration/DataFrameRestTestCase.java | 11 +- ...ansportDeleteDataFrameTransformAction.java | 96 +++++-------- ...portGetDataFrameTransformsStatsAction.java | 1 - .../TransportPutDataFrameTransformAction.java | 4 - ...TransportStopDataFrameTransformAction.java | 131 +++++++++++------- .../RestDeleteDataFrameTransformAction.java | 3 +- .../transforms/DataFrameTransformTask.java | 54 ++------ .../test/data_frame/transforms_start_stop.yml | 4 + 15 files changed, 249 insertions(+), 277 deletions(-) delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionResponseTests.java diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index f01db621bc2e0..1bd49154ee548 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -141,7 +141,8 @@ private void indexData(String indexName) throws IOException { @After public void cleanUpTransforms() throws IOException { for (String transformId : transformsToClean) { - highLevelClient().dataFrame().stopDataFrameTransform(new StopDataFrameTransformRequest(transformId), RequestOptions.DEFAULT); + highLevelClient().dataFrame().stopDataFrameTransform( + new StopDataFrameTransformRequest(transformId, Boolean.TRUE, null), RequestOptions.DEFAULT); } for (String transformId : transformsToClean) { @@ -265,7 +266,7 @@ public void testStartStop() throws IOException { assertThat(statsResponse.getTransformsStateAndStats(), hasSize(1)); assertEquals(IndexerState.STARTED, statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState()); - StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id); + StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, Boolean.TRUE, null); StopDataFrameTransformResponse stopResponse = execute(stopRequest, client::stopDataFrameTransform, client::stopDataFrameTransformAsync); assertTrue(stopResponse.isStopped()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index 3c5059279b44d..07713d5371460 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -76,7 +76,8 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest @After public void cleanUpTransforms() throws IOException { for (String transformId : transformsToClean) { - highLevelClient().dataFrame().stopDataFrameTransform(new StopDataFrameTransformRequest(transformId), RequestOptions.DEFAULT); + highLevelClient().dataFrame().stopDataFrameTransform( + new StopDataFrameTransformRequest(transformId, Boolean.TRUE, TimeValue.timeValueSeconds(20)), RequestOptions.DEFAULT); } for (String transformId : transformsToClean) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java index 6b7de0ab80f3a..715fa0f5dc78b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java @@ -7,25 +7,18 @@ import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.support.tasks.BaseTasksRequest; -import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; import java.io.IOException; -import java.util.Collections; -import java.util.List; import java.util.Objects; -public class DeleteDataFrameTransformAction extends Action { +public class DeleteDataFrameTransformAction extends Action { public static final DeleteDataFrameTransformAction INSTANCE = new DeleteDataFrameTransformAction(); public static final String NAME = "cluster:admin/data_frame/delete"; @@ -35,17 +28,21 @@ private DeleteDataFrameTransformAction() { } @Override - public Response newResponse() { + public AcknowledgedResponse newResponse() { throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override - public Writeable.Reader getResponseReader() { - return Response::new; + public Writeable.Reader getResponseReader() { + return in -> { + AcknowledgedResponse response = new AcknowledgedResponse(); + response.readFrom(in); + return response; + }; } - public static class Request extends BaseTasksRequest { - private final String id; + public static class Request extends MasterNodeRequest { + private String id; public Request(String id) { this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); @@ -60,11 +57,6 @@ public String getId() { return id; } - @Override - public boolean match(Task task) { - return task.getDescription().equals(DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + id); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -94,59 +86,4 @@ public boolean equals(Object obj) { return Objects.equals(id, other.id); } } - - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { - - private final boolean acknowledged; - - public Response(StreamInput in) throws IOException { - super(in); - acknowledged = in.readBoolean(); - } - - public Response(boolean acknowledged, List taskFailures, List nodeFailures) { - super(taskFailures, nodeFailures); - this.acknowledged = acknowledged; - } - - public Response(boolean acknowledged) { - this(acknowledged, Collections.emptyList(), Collections.emptyList()); - } - - public boolean isDeleted() { - return acknowledged; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(acknowledged); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - toXContentCommon(builder, params); - builder.field("acknowledged", acknowledged); - } - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - DeleteDataFrameTransformAction.Response response = (DeleteDataFrameTransformAction.Response) o; - return super.equals(o) && acknowledged == response.acknowledged; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), acknowledged); - } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index ec7e0de9e34fc..ccf075b13ae5a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -22,9 +22,11 @@ * An abstract class that builds an index incrementally. A background job can be launched using {@link #maybeTriggerAsyncJob(long)}, * it will create the index from the source index up to the last complete bucket that is allowed to be built (based on job position). * Only one background job can run simultaneously and {@link #onFinish} is called when the job - * finishes. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} is called if the indexer is - * aborted while a job is running. The indexer must be started ({@link #start()} to allow a background job to run when - * {@link #maybeTriggerAsyncJob(long)} is called. {@link #stop()} can be used to stop the background job without aborting the indexer. + * finishes. {@link #onStop()} is called after the current search returns when the job is stopped early via a call + * to {@link #stop()}. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} + * is called if the indexer is aborted while a job is running. The indexer must be started ({@link #start()} + * to allow a background job to run when {@link #maybeTriggerAsyncJob(long)} is called. + * {@link #stop()} can be used to stop the background job without aborting the indexer. * * In a nutshell this is a 2 cycle engine: 1st it sends a query, 2nd it indexes documents based on the response, sends the next query, * indexes, queries, indexes, ... until a condition lets the engine pause until the source provides new input. @@ -84,8 +86,10 @@ public synchronized IndexerState start() { /** * Sets the internal state to {@link IndexerState#STOPPING} if an async job is - * running in the background. If there is no job running when this function is - * called, the state is directly set to {@link IndexerState#STOPPED}. + * running in the background, {@link #onStop()} will be called when the background job + * detects that the indexer is stopped. + * If there is no job running when this function is called + * the state is set to {@link IndexerState#STOPPED} and {@link #onStop()} called directly. * * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the job was already aborted). */ @@ -94,6 +98,7 @@ public synchronized IndexerState stop() { if (previousState == IndexerState.INDEXING) { return IndexerState.STOPPING; } else if (previousState == IndexerState.STARTED) { + onStop(); return IndexerState.STOPPED; } else { return previousState; @@ -251,6 +256,14 @@ public synchronized boolean maybeTriggerAsyncJob(long now) { */ protected abstract void onFinish(ActionListener listener); + /** + * Called when the indexer is stopped. This is only called when the indexer is stopped + * via {@link #stop()} as opposed to {@link #onFinish(ActionListener)} which is called + * when the indexer's work is done. + */ + protected void onStop() { + } + /** * Called when a background job detects that the indexer is aborted causing the * async execution to stop. @@ -276,6 +289,7 @@ private IndexerState finishAndSetState() { case STOPPING: // must be started again + onStop(); return IndexerState.STOPPED; case ABORTING: diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionResponseTests.java deleted file mode 100644 index 54501fde5cfe8..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionResponseTests.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.core.dataframe.action; - -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Response; - -public class DeleteDataFrameTransformActionResponseTests extends AbstractWireSerializingDataFrameTestCase { - @Override - protected Response createTestInstance() { - return new Response(randomBoolean()); - } - - @Override - protected Reader instanceReader() { - return Response::new; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index b39c4f1a25a76..e56491bdb5764 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESTestCase; +import org.junit.Before; import java.io.IOException; import java.util.Collections; @@ -34,17 +35,26 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { AtomicBoolean isFinished = new AtomicBoolean(false); + AtomicBoolean isStopped = new AtomicBoolean(false); + + @Before + public void reset() { + isFinished.set(false); + isStopped.set(false); + } private class MockIndexer extends AsyncTwoPhaseIndexer { private final CountDownLatch latch; // test the execution order private volatile int step; + private final boolean stoppedBeforeFinished; protected MockIndexer(Executor executor, AtomicReference initialState, Integer initialPosition, - CountDownLatch latch) { + CountDownLatch latch, boolean stoppedBeforeFinished) { super(executor, initialState, initialPosition, new MockJobStats()); this.latch = latch; + this.stoppedBeforeFinished = stoppedBeforeFinished; } @Override @@ -57,7 +67,7 @@ protected IterationResult doProcess(SearchResponse searchResponse) { awaitForLatch(); assertThat(step, equalTo(3)); ++step; - return new IterationResult(Collections.emptyList(), 3, true); + return new IterationResult<>(Collections.emptyList(), 3, true); } private void awaitForLatch() { @@ -99,7 +109,8 @@ protected void doNextBulk(BulkRequest request, ActionListener next @Override protected void doSaveState(IndexerState state, Integer position, Runnable next) { - assertThat(step, equalTo(5)); + int expectedStep = stoppedBeforeFinished ? 3 : 5; + assertThat(step, equalTo(expectedStep)); ++step; next.run(); } @@ -114,7 +125,12 @@ protected void onFinish(ActionListener listener) { assertThat(step, equalTo(4)); ++step; listener.onResponse(null); - isFinished.set(true); + assertTrue(isFinished.compareAndSet(false, true)); + } + + @Override + protected void onStop() { + assertTrue(isStopped.compareAndSet(false, true)); } @Override @@ -180,7 +196,7 @@ protected void doSaveState(IndexerState state, Integer position, Runnable next) protected void onFailure(Exception exc) { assertThat(step, equalTo(2)); ++step; - isFinished.set(true); + assertTrue(isFinished.compareAndSet(false, true)); } @Override @@ -209,10 +225,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public void testStateMachine() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); - isFinished.set(false); try { CountDownLatch countDownLatch = new CountDownLatch(1); - MockIndexer indexer = new MockIndexer(executor, state, 2, countDownLatch); + MockIndexer indexer = new MockIndexer(executor, state, 2, countDownLatch, false); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); @@ -220,7 +235,8 @@ public void testStateMachine() throws Exception { countDownLatch.countDown(); assertThat(indexer.getPosition(), equalTo(2)); - ESTestCase.awaitBusy(() -> isFinished.get()); + assertTrue(awaitBusy(() -> isFinished.get())); + assertFalse(isStopped.get()); assertThat(indexer.getStep(), equalTo(6)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(1L)); @@ -234,18 +250,57 @@ public void testStateMachine() throws Exception { public void testStateMachineBrokenSearch() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); - isFinished.set(false); try { MockIndexerThrowsFirstSearch indexer = new MockIndexerThrowsFirstSearch(executor, state, 2); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); - assertTrue(ESTestCase.awaitBusy(() -> isFinished.get(), 10000, TimeUnit.SECONDS)); + assertTrue(awaitBusy(() -> isFinished.get(), 10000, TimeUnit.SECONDS)); assertThat(indexer.getStep(), equalTo(3)); } finally { executor.shutdownNow(); } } + + public void testStop_AfterIndexerIsFinished() throws InterruptedException { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + final ExecutorService executor = Executors.newFixedThreadPool(1); + try { + CountDownLatch countDownLatch = new CountDownLatch(1); + MockIndexer indexer = new MockIndexer(executor, state, 2, countDownLatch, false); + indexer.start(); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + countDownLatch.countDown(); + assertTrue(awaitBusy(() -> isFinished.get())); + + indexer.stop(); + assertTrue(isStopped.get()); + assertThat(indexer.getState(), equalTo(IndexerState.STOPPED)); + } finally { + executor.shutdownNow(); + } + } + + public void testStop_WhileIndexing() throws InterruptedException { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + final ExecutorService executor = Executors.newFixedThreadPool(1); + try { + CountDownLatch countDownLatch = new CountDownLatch(1); + MockIndexer indexer = new MockIndexer(executor, state, 2, countDownLatch, true); + indexer.start(); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + indexer.stop(); + countDownLatch.countDown(); + + assertThat(indexer.getPosition(), equalTo(2)); + assertTrue(awaitBusy(() -> isStopped.get())); + assertFalse(isFinished.get()); + } finally { + executor.shutdownNow(); + } + } } diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java index 84f3e05de5cd1..ba6a6137789a3 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java @@ -93,11 +93,11 @@ protected StartDataFrameTransformAction.Response startDataFrameTransform(String new StartDataFrameTransformAction.Request(id, false)).actionGet(); } - protected DeleteDataFrameTransformAction.Response deleteDataFrameTransform(String id) { - DeleteDataFrameTransformAction.Response response = client().execute(DeleteDataFrameTransformAction.INSTANCE, + protected AcknowledgedResponse deleteDataFrameTransform(String id) { + AcknowledgedResponse response = client().execute(DeleteDataFrameTransformAction.INSTANCE, new DeleteDataFrameTransformAction.Request(id)) .actionGet(); - if (response.isDeleted()) { + if (response.isAcknowledged()) { transformConfigs.remove(id); } return response; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index 85c0ac44a69af..4344aa823b4cc 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -21,6 +21,7 @@ import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.junit.After; import org.junit.AfterClass; import java.io.IOException; @@ -272,16 +273,20 @@ protected static void deleteDataFrameTransform(String transformId) throws IOExce adminClient().performRequest(request); } - @AfterClass - public static void removeIndices() throws Exception { + @After + public void waitForDataFrame() throws Exception { wipeDataFrameTransforms(); waitForPendingDataFrameTasks(); + } + + @AfterClass + public static void removeIndices() throws Exception { // we might have disabled wiping indices, but now its time to get rid of them // note: can not use super.cleanUpCluster() as this method must be static wipeIndices(); } - protected static void wipeDataFrameTransforms() throws IOException, InterruptedException { + public void wipeDataFrameTransforms() throws IOException, InterruptedException { List> transformConfigs = getDataFrameTransforms(); for (Map transformConfig : transformConfigs) { String transformId = (String) transformConfig.get("id"); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java index 2cdc4009e785b..ac40334dfb443 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java @@ -5,93 +5,73 @@ */ package org.elasticsearch.xpack.dataframe.action; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.tasks.Task; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Request; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Response; -import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; -import java.util.List; +import java.io.IOException; -public class TransportDeleteDataFrameTransformAction extends TransportTasksAction { +public class TransportDeleteDataFrameTransformAction extends TransportMasterNodeAction { private final DataFrameTransformsConfigManager transformsConfigManager; @Inject - public TransportDeleteDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, - ClusterService clusterService, DataFrameTransformsConfigManager transformsConfigManager) { - super(DeleteDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, - Response::new, ThreadPool.Names.SAME); + public TransportDeleteDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool, + ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, + DataFrameTransformsConfigManager transformsConfigManager) { + super(DeleteDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, + Request::new, indexNameExpressionResolver); this.transformsConfigManager = transformsConfigManager; } @Override - protected Response newResponse(Request request, List tasks, List taskOperationFailures, - List failedNodeExceptions) { - assert tasks.size() + taskOperationFailures.size() == 1; - boolean cancelled = tasks.size() > 0 && tasks.stream().allMatch(Response::isDeleted); + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } - return new Response(cancelled, taskOperationFailures, failedNodeExceptions); + protected AcknowledgedResponse read(StreamInput in) throws IOException { + AcknowledgedResponse response = new AcknowledgedResponse(); + response.readFrom(in); + return response; } @Override - protected void taskOperation(Request request, DataFrameTransformTask task, ActionListener listener) { - assert task.getTransformId().equals(request.getId()); - IndexerState state = task.getState().getIndexerState(); - if (state.equals(IndexerState.STOPPED)) { - task.onCancelled(); - transformsConfigManager.deleteTransform(request.getId(), ActionListener.wrap(r -> { - listener.onResponse(new Response(true)); - }, listener::onFailure)); + protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null) { + listener.onFailure(new ElasticsearchStatusException("Cannot delete data frame [" + request.getId() + + "] as the task is running. Stop the task first", RestStatus.CONFLICT)); } else { - listener.onFailure(new IllegalStateException("Could not delete transform [" + request.getId() + "] because " - + "indexer state is [" + state + "]. Transform must be [" + IndexerState.STOPPED + "] before deletion.")); + // Task is not running, delete the configuration document + transformsConfigManager.deleteTransform(request.getId(), ActionListener.wrap( + r -> listener.onResponse(new AcknowledgedResponse(r)), + listener::onFailure)); } } @Override - protected void doExecute(Task task, Request request, ActionListener listener) { - final ClusterState state = clusterService.state(); - final DiscoveryNodes nodes = state.nodes(); - if (nodes.isLocalNodeElectedMaster()) { - PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null) { - super.doExecute(task, request, listener); - } else { - // we couldn't find the transform in the persistent task CS, but maybe the transform exists in the configuration index, - // if so delete the orphaned document and do not throw (for the normal case we want to stop the task first, - // than delete the configuration document if and only if the data frame transform is in stopped state) - transformsConfigManager.deleteTransform(request.getId(), ActionListener.wrap(r -> { - listener.onResponse(new Response(true)); - return; - }, listener::onFailure)); - } - } else { - // Delegates DeleteTransform to elected master node, so it becomes the coordinating node. - // Non-master nodes may have a stale cluster state that shows transforms which are cancelled - // on the master, which makes testing difficult. - if (nodes.getMasterNode() == null) { - listener.onFailure(new MasterNotDiscoveredException("no known master nodes")); - } else { - transportService.sendRequest(nodes.getMasterNode(), actionName, request, - new ActionListenerResponseHandler<>(listener, Response::new)); - } - } + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java index 7ab5f28001407..bb01da4c7e50a 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java @@ -132,7 +132,6 @@ protected void doExecute(Task task, Request request, ActionListener fi }, e -> { // If the index to search, or the individual config is not there, just return empty - logger.error("failed to expand ids", e); if (e instanceof ResourceNotFoundException) { finalListener.onResponse(new Response(Collections.emptyList())); } else { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java index 0b8ef692cdd8c..997739b2407a7 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java @@ -6,8 +6,6 @@ package org.elasticsearch.xpack.dataframe.action; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.ActionListener; @@ -63,8 +61,6 @@ public class TransportPutDataFrameTransformAction extends TransportMasterNodeAction { - private static final Logger logger = LogManager.getLogger(TransportPutDataFrameTransformAction.class); - private final XPackLicenseState licenseState; private final Client client; private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java index 120f1ef77596b..26f5259c69dc8 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java @@ -5,64 +5,85 @@ */ package org.elasticsearch.xpack.dataframe.action; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.action.util.PageParams; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; -import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; - public class TransportStopDataFrameTransformAction extends TransportTasksAction { - private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); private final ThreadPool threadPool; private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; + private final PersistentTasksService persistentTasksService; @Inject public TransportStopDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService, ThreadPool threadPool, + PersistentTasksService persistentTasksService, DataFrameTransformsConfigManager dataFrameTransformsConfigManager) { super(StopDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, StopDataFrameTransformAction.Request::new, StopDataFrameTransformAction.Response::new, StopDataFrameTransformAction.Response::new, ThreadPool.Names.SAME); this.threadPool = threadPool; this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; + this.persistentTasksService = persistentTasksService; } @Override protected void doExecute(Task task, StopDataFrameTransformAction.Request request, ActionListener listener) { + final ClusterState state = clusterService.state(); + final DiscoveryNodes nodes = state.nodes(); + if (nodes.isLocalNodeElectedMaster() == false) { + // Delegates stop data frame to elected master node so it becomes the coordinating node. + if (nodes.getMasterNode() == null) { + listener.onFailure(new MasterNotDiscoveredException("no known master node")); + } else { + transportService.sendRequest(nodes.getMasterNode(), actionName, request, + new ActionListenerResponseHandler<>(listener, StopDataFrameTransformAction.Response::new)); + } + } else { + final ActionListener finalListener; + if (request.waitForCompletion()) { + finalListener = waitForStopListener(request, listener); + } else { + finalListener = listener; + } - dataFrameTransformsConfigManager.expandTransformIds(request.getId(), new PageParams(0, 10_000), ActionListener.wrap( - expandedIds -> { - request.setExpandedIds(new HashSet<>(expandedIds)); - request.setNodes(DataFrameNodes.dataFrameTaskNodes(expandedIds, clusterService.state())); - super.doExecute(task, request, listener); - }, - listener::onFailure - )); + dataFrameTransformsConfigManager.expandTransformIds(request.getId(), new PageParams(0, 10_000), ActionListener.wrap( + expandedIds -> { + request.setExpandedIds(new HashSet<>(expandedIds)); + request.setNodes(DataFrameNodes.dataFrameTaskNodes(expandedIds, clusterService.state())); + super.doExecute(task, request, finalListener); + }, + listener::onFailure + )); + } } @Override @@ -84,42 +105,9 @@ protected void taskOperation(StopDataFrameTransformAction.Request request, DataF RestStatus.CONFLICT)); return; } - if (request.waitForCompletion() == false) { - transformTask.stop(listener); - } else { - ActionListener blockingListener = ActionListener.wrap(response -> { - if (response.isStopped()) { - // The Task acknowledged that it is stopped/stopping... wait until the status actually - // changes over before returning. Switch over to Generic threadpool so - // we don't block the network thread - threadPool.generic().execute(() -> { - try { - long untilInNanos = System.nanoTime() + request.getTimeout().getNanos(); - - while (System.nanoTime() - untilInNanos < 0) { - if (transformTask.isStopped()) { - listener.onResponse(response); - return; - } - Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis()); - } - // ran out of time - listener.onFailure(new ElasticsearchTimeoutException( - DataFrameMessages.getMessage(DataFrameMessages.REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_TIMEOUT, - request.getTimeout().getStringRep(), request.getId()))); - } catch (InterruptedException e) { - listener.onFailure(new ElasticsearchException(DataFrameMessages.getMessage( - DataFrameMessages.REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_INTERRUPT, request.getId()), e)); - } - }); - } else { - // Did not acknowledge stop, just return the response - listener.onResponse(response); - } - }, listener::onFailure); - - transformTask.stop(blockingListener); - } + + transformTask.stop(); + listener.onResponse(new StopDataFrameTransformAction.Response(Boolean.TRUE)); } else { listener.onFailure(new RuntimeException("ID of data frame indexer task [" + transformTask.getTransformId() + "] does not match request's ID [" + request.getId() + "]")); @@ -139,4 +127,47 @@ protected StopDataFrameTransformAction.Response newResponse(StopDataFrameTransfo boolean allStopped = tasks.stream().allMatch(StopDataFrameTransformAction.Response::isStopped); return new StopDataFrameTransformAction.Response(allStopped); } + + private ActionListener + waitForStopListener(StopDataFrameTransformAction.Request request, + ActionListener listener) { + + return ActionListener.wrap( + response -> { + // Wait until the persistent task is stopped + // Switch over to Generic threadpool so we don't block the network thread + threadPool.generic().execute(() -> + waitForDataFrameStopped(request.getExpandedIds(), request.getTimeout(), listener)); + }, + listener::onFailure + ); + } + + private void waitForDataFrameStopped(Collection persistentTaskIds, TimeValue timeout, + ActionListener listener) { + persistentTasksService.waitForPersistentTasksCondition(persistentTasksCustomMetaData -> { + + if (persistentTasksCustomMetaData == null) { + return true; + } + + for (String persistentTaskId : persistentTaskIds) { + if (persistentTasksCustomMetaData.getTask(persistentTaskId) != null) { + return false; + } + } + return true; + + }, timeout, new ActionListener<>() { + @Override + public void onResponse(Boolean result) { + listener.onResponse(new StopDataFrameTransformAction.Response(Boolean.TRUE)); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java index 183952e060338..125e61b5021e4 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; @@ -33,7 +34,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient DeleteDataFrameTransformAction.Request request = new DeleteDataFrameTransformAction.Request(id); return channel -> client.execute(DeleteDataFrameTransformAction.INSTANCE, request, - new BaseTasksResponseToXContentListener<>(channel)); + new RestToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 2020300a0cf77..c332d29945aaf 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction.Response; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; @@ -85,7 +84,7 @@ public DataFrameTransformTask(long id, String type, String action, TaskId parent String initialReason = null; long initialGeneration = 0; Map initialPosition = null; - logger.info("[{}] init, got state: [{}]", transform.getId(), state != null); + logger.trace("[{}] init, got state: [{}]", transform.getId(), state != null); if (state != null) { initialTaskState = state.getTaskState(); initialReason = state.getReason(); @@ -218,51 +217,17 @@ public synchronized void start(ActionListener listener) { )); } - public synchronized void stop(ActionListener listener) { + public synchronized void stop() { if (getIndexer() == null) { - listener.onFailure(new ElasticsearchException("Task for transform [{}] not fully initialized. Try again later", - getTransformId())); return; } // taskState is initialized as STOPPED and is updated in tandem with the indexerState // Consequently, if it is STOPPED, we consider the whole task STOPPED. if (taskState.get() == DataFrameTransformTaskState.STOPPED) { - listener.onResponse(new StopDataFrameTransformAction.Response(true)); return; } - final IndexerState newState = getIndexer().stop(); - switch (newState) { - case STOPPED: - // Fall through to `STOPPING` as the behavior is the same for both, we should persist for both - case STOPPING: - // update the persistent state to STOPPED. There are two scenarios and both are safe: - // 1. we persist STOPPED now, indexer continues a bit then sees the flag and checkpoints another STOPPED with the more recent - // position. - // 2. we persist STOPPED now, indexer continues a bit but then dies. When/if we resume we'll pick up at last checkpoint, - // overwrite some docs and eventually checkpoint. - taskState.set(DataFrameTransformTaskState.STOPPED); - DataFrameTransformState state = new DataFrameTransformState( - DataFrameTransformTaskState.STOPPED, - IndexerState.STOPPED, - getIndexer().getPosition(), - currentCheckpoint.get(), - stateReason.get(), - getIndexer().getProgress()); - persistStateToClusterState(state, ActionListener.wrap( - task -> { - auditor.info(transform.getId(), "Updated state to [" + state.getTaskState() + "]"); - listener.onResponse(new StopDataFrameTransformAction.Response(true)); - }, - exc -> listener.onFailure(new ElasticsearchException( - "Error while updating state for data frame transform [{}] to [{}]", exc, - transform.getId(), - state.getIndexerState())))); - break; - default: - listener.onFailure(new ElasticsearchException("Cannot stop task for data frame transform [{}], because state was [{}]", - transform.getId(), newState)); - break; - } + + getIndexer().stop(); } @Override @@ -280,12 +245,10 @@ public synchronized void triggered(Event event) { /** * Attempt to gracefully cleanup the data frame transform so it can be terminated. - * This tries to remove the job from the scheduler, and potentially any other - * cleanup operations in the future + * This tries to remove the job from the scheduler and completes the persistent task */ synchronized void shutdown() { try { - logger.info("Data frame indexer [" + transform.getId() + "] received abort request, stopping indexer."); schedulerEngine.remove(SCHEDULE_NAME + "_" + transform.getId()); schedulerEngine.unregister(this); } catch (Exception e) { @@ -612,6 +575,13 @@ protected void onFinish(ActionListener listener) { } } + @Override + protected void onStop() { + auditor.info(transformConfig.getId(), "Indexer has stopped"); + logger.info("Data frame transform [{}] indexer has stopped", transformConfig.getId()); + transformTask.shutdown(); + } + @Override protected void onAbort() { auditor.info(transformConfig.getId(), "Received abort request, stopping indexer"); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index f1ac07b72340c..1e9223b79f201 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -106,6 +106,7 @@ teardown: - do: data_frame.stop_data_frame_transform: transform_id: "airline-transform-start-stop" + wait_for_completion: true - match: { stopped: true } - do: @@ -199,6 +200,7 @@ teardown: - do: data_frame.stop_data_frame_transform: transform_id: "airline-transform-start-later" + wait_for_completion: true - match: { stopped: true } - do: @@ -232,6 +234,8 @@ teardown: - do: data_frame.stop_data_frame_transform: transform_id: "_all" + wait_for_completion: true + - match: { stopped: true } - do: From a3cc03eb1503df24c1706a721fcc9af38c3b2873 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Fri, 10 May 2019 14:19:26 +0300 Subject: [PATCH 065/321] Docs: Documentation for the upcoming SQL support of frozen indices (#41863) Add docs for SQL Frozen indices support --- docs/reference/sql/endpoints/jdbc.asciidoc | 4 ++ docs/reference/sql/endpoints/rest.asciidoc | 4 ++ docs/reference/sql/language/index.asciidoc | 2 +- ...dex-patterns.asciidoc => indices.asciidoc} | 36 +++++++++++++++++- .../syntax/commands/show-tables.asciidoc | 10 +++-- .../xpack/sql/qa/jdbc/DataLoader.java | 3 ++ .../qa/src/main/resources/docs/docs.csv-spec | 30 +++++++++++++++ .../logical/command/sys/SysTablesTests.java | 38 +++++++++++++++++-- 8 files changed, 116 insertions(+), 11 deletions(-) rename docs/reference/sql/language/{index-patterns.asciidoc => indices.asciidoc} (65%) diff --git a/docs/reference/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc index 33f130a891896..4e9de8934895a 100644 --- a/docs/reference/sql/endpoints/jdbc.asciidoc +++ b/docs/reference/sql/endpoints/jdbc.asciidoc @@ -136,6 +136,10 @@ Query timeout (in seconds). That is the maximum amount of time waiting for a que `field.multi.value.leniency` (default `true`):: Whether to be lenient and return the first value (without any guarantees of what that will be - typically the first in natural ascending order) for fields with multiple values (true) or throw an exception. +[float] +==== Index +`index.include.frozen` (default `false`):: Whether to include <> in the query execution or not (default). + [float] ==== Additional diff --git a/docs/reference/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc index c55b379bfffe6..e44649f3a8767 100644 --- a/docs/reference/sql/endpoints/rest.asciidoc +++ b/docs/reference/sql/endpoints/rest.asciidoc @@ -360,6 +360,10 @@ More information available https://docs.oracle.com/javase/8/docs/api/java/time/Z |false |Throw an exception when encountering multiple values for a field (default) or be lenient and return the first value from the list (without any guarantees of what that will be - typically the first in natural ascending order). +|index_include_frozen +|false +|Whether to include <> in the query execution or not (default). + |=== Do note that most parameters (outside the timeout and `columnar` ones) make sense only during the initial query - any follow-up pagination request only requires the `cursor` parameter as explained in the <> chapter. diff --git a/docs/reference/sql/language/index.asciidoc b/docs/reference/sql/language/index.asciidoc index 6ea6a15b3ed64..2ae33d32f84ee 100644 --- a/docs/reference/sql/language/index.asciidoc +++ b/docs/reference/sql/language/index.asciidoc @@ -13,4 +13,4 @@ This chapter describes the SQL syntax and semantics supported namely: include::syntax/lexic/index.asciidoc[] include::syntax/commands/index.asciidoc[] include::data-types.asciidoc[] -include::index-patterns.asciidoc[] +include::indices.asciidoc[] diff --git a/docs/reference/sql/language/index-patterns.asciidoc b/docs/reference/sql/language/indices.asciidoc similarity index 65% rename from docs/reference/sql/language/index-patterns.asciidoc rename to docs/reference/sql/language/indices.asciidoc index 44f951d36a028..5a3b5514931a2 100644 --- a/docs/reference/sql/language/index-patterns.asciidoc +++ b/docs/reference/sql/language/indices.asciidoc @@ -5,7 +5,9 @@ {es-sql} supports two types of patterns for matching multiple indices or tables: -* {es} multi-index +[[sql-index-patterns-multi]] +[float] +=== {es} multi-index The {es} notation for enumerating, including or excluding <> is supported _as long_ as it is quoted or escaped as a table identifier. @@ -33,7 +35,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[fromTablePatternQuoted] NOTE: There is the restriction that all resolved concrete tables have the exact same mapping. -* SQL `LIKE` notation +[[sql-index-patterns-like]] +[float] +=== SQL `LIKE` notation The common `LIKE` statement (including escaping if needed) to match a wildcard pattern, based on one `_` or multiple `%` characters. @@ -81,3 +85,31 @@ Which one to use, is up to you however try to stick to the same one across your NOTE: As the query type of quoting between the two patterns is fairly similar (`"` vs `'`), {es-sql} _always_ requires the keyword `LIKE` for SQL `LIKE` pattern. +[[sql-index-frozen]] +== Frozen Indices + +{es} <> are a useful and powerful tool for hot/warm architecture introduced in {es} 6.6, +essentially by trading speed for memory. +{es-sql} supports frozen indices and similar to {es}, due to their performance characteristics, allows searches on them only +when explicitly told so by user - in other words, by default, frozen indices are not included in searches. + +One can toggle the use of frozen indices through: + +::dedicated configuration parameter +Set to `true` properties `index_include_frozen` in the <> or `index.include.frozen` in the drivers to include frozen indices. + +::dedicated keyword +Explicitly perform the inclusion through the dedicated `FROZEN` keyword in the `FROM` clause or `INCLUDE FROZEN` in the `SHOW ` commands: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesIncludeFrozen] +---- + + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableIncludeFrozen] +---- + +Unless enabled, frozen indices are completely ignored; it is as if they do not exist and as such, queries ran against them are likely to fail. \ No newline at end of file diff --git a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc index 691d328aa4bdd..554819e24b178 100644 --- a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc @@ -7,12 +7,14 @@ [source, sql] ---- SHOW TABLES - [table identifier | <1> - [LIKE pattern ]]? <2> + [INCLUDE FROZEN]? <1> + [table identifier | <2> + [LIKE pattern ]]? <3> ---- -<1> single table identifier or double quoted es multi index -<2> SQL LIKE pattern +<1> Whether or not to include frozen indices +<2> single table identifier or double quoted es multi index +<3> SQL LIKE pattern See <> for more information about patterns. diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java index fe2e84e962fd3..774a406da863c 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java @@ -58,6 +58,9 @@ public static void loadDocsDatasetIntoEs(RestClient client) throws Exception { loadEmpDatasetIntoEs(client, "emp", "employees"); loadLibDatasetIntoEs(client, "library"); makeAlias(client, "employees", "emp"); + // frozen index + loadLibDatasetIntoEs(client, "archive"); + freeze(client, "archive"); } private static void createString(String name, XContentBuilder builder) throws Exception { diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index 2fe719e8cb485..c2432007bff35 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -172,6 +172,25 @@ employees |VIEW |ALIAS // end::showTablesEsMultiIndex ; + +// +// include FROZEN +// +showTablesIncludeFrozen +// tag::showTablesIncludeFrozen +SHOW TABLES INCLUDE FROZEN; + + name | type | kind +---------------+---------------+--------------- +archive |BASE TABLE |FROZEN INDEX +emp |BASE TABLE |INDEX +employees |VIEW |ALIAS +library |BASE TABLE |INDEX + +// end::showTablesIncludeFrozen +; + + /////////////////////////////// // // Show Functions @@ -463,6 +482,17 @@ SELECT * FROM "emp" LIMIT 1; // end::fromTableQuoted ; +fromTableIncludeFrozen +// tag::fromTableIncludeFrozen +SELECT * FROM FROZEN archive LIMIT 1; + + author | name | page_count | release_date +-----------------+--------------------+---------------+-------------------- +James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00Z + +// end::fromTableIncludeFrozen +; + fromTableQuoted // tag::fromTablePatternQuoted SELECT emp_no FROM "e*p" LIMIT 1; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java index be32e8e81f9b1..d4db97aba09cf 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java @@ -19,13 +19,17 @@ import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.sql.parser.SqlParser; import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.stats.Metrics; import org.elasticsearch.xpack.sql.type.DataTypes; import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.type.TypesTests; +import org.elasticsearch.xpack.sql.util.DateUtils; import java.util.Comparator; import java.util.Iterator; @@ -52,6 +56,9 @@ public class SysTablesTests extends ESTestCase { private final IndexInfo alias = new IndexInfo("alias", IndexType.ALIAS); private final IndexInfo frozen = new IndexInfo("frozen", IndexType.FROZEN_INDEX); + private final Configuration FROZEN_CFG = new Configuration(DateUtils.UTC, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, + Protocol.PAGE_TIMEOUT, null, Mode.PLAIN, null, null, null, false, true); + // // catalog enumeration // @@ -150,6 +157,20 @@ public void testSysTablesNoTypes() throws Exception { }, index, alias); } + public void testSysTablesNoTypesAndFrozen() throws Exception { + executeCommand("SYS TABLES", r -> { + assertEquals(3, r.size()); + assertEquals("frozen", r.column(2)); + assertEquals("BASE TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("test", r.column(2)); + assertEquals("BASE TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + assertEquals("VIEW", r.column(3)); + }, FROZEN_CFG, index, alias, frozen); + } + public void testSysTablesWithLegacyTypes() throws Exception { executeCommand("SYS TABLES TYPE 'TABLE', 'ALIAS'", r -> { assertEquals(2, r.size()); @@ -327,7 +348,7 @@ private SqlTypedParamValue param(Object value) { return new SqlTypedParamValue(DataTypes.fromJava(value).typeName, value); } - private Tuple sql(String sql, List params) { + private Tuple sql(String sql, List params, Configuration cfg) { EsIndex test = new EsIndex("test", mapping); Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), IndexResolution.valid(test), new Verifier(new Metrics())); @@ -336,7 +357,7 @@ private Tuple sql(String sql, List para IndexResolver resolver = mock(IndexResolver.class); when(resolver.clusterName()).thenReturn(CLUSTER_NAME); - SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); + SqlSession session = new SqlSession(cfg, null, null, resolver, null, null, null, null, null); return new Tuple<>(cmd, session); } @@ -344,10 +365,19 @@ private void executeCommand(String sql, Consumer consumer, IndexIn executeCommand(sql, emptyList(), consumer, infos); } - @SuppressWarnings({ "unchecked", "rawtypes" }) + private void executeCommand(String sql, Consumer consumer, Configuration cfg, IndexInfo... infos) throws Exception { + executeCommand(sql, emptyList(), consumer, cfg, infos); + } + private void executeCommand(String sql, List params, Consumer consumer, IndexInfo... infos) throws Exception { - Tuple tuple = sql(sql, params); + executeCommand(sql, params, consumer, TestUtils.TEST_CFG, infos); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + private void executeCommand(String sql, List params, Consumer consumer, Configuration cfg, + IndexInfo... infos) throws Exception { + Tuple tuple = sql(sql, params, cfg); IndexResolver resolver = tuple.v2().indexResolver(); From 044af2a4c4712bff4538b5eeee7b0f54dc2aba82 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 10 May 2019 14:24:42 +0300 Subject: [PATCH 066/321] Fix slow sync test clustres artifacts task (#42012) * Fix slow sync test clustres artifacts task The task was mistakenly adding a combinational explosion of task actions all doing the same thing. With this PR this is fixed and each version - distribution pair is only extracted once. I appologieze for the SSD wear. * Look for configurations on the root project * Add dependency on configurations * This should be a `copy` so we don't blow away all the other distros * Don't copy example plugin build directory in integration tests --- .../testclusters/TestClustersPlugin.java | 61 +++++++++++-------- .../gradle/BuildExamplePluginsIT.java | 3 +- 2 files changed, 36 insertions(+), 28 deletions(-) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index c1ed6b770f04c..daca1f5ebb191 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -182,8 +182,9 @@ private void configureClaimClustersHook(Project project) { claimsInventory.put(elasticsearchCluster, claimsInventory.getOrDefault(elasticsearchCluster, 0) + 1); } })); - - logger.info("Claims inventory: {}", claimsInventory); + if (claimsInventory.isEmpty() == false) { + logger.info("Claims inventory: {}", claimsInventory); + } }); } @@ -279,8 +280,14 @@ private static void autoConfigureClusterDependencies( // the clusters will look for artifacts there based on the naming conventions. // Tasks that use a cluster will add this as a dependency automatically so it's guaranteed to run early in // the build. - Task sync = Boilerplate.maybeCreate(rootProject.getTasks(), SYNC_ARTIFACTS_TASK_NAME, onCreate -> { + Boilerplate.maybeCreate(rootProject.getTasks(), SYNC_ARTIFACTS_TASK_NAME, onCreate -> { onCreate.getOutputs().dir(getExtractDir(rootProject)); + onCreate.getInputs().files( + project.getRootProject().getConfigurations().matching(conf -> conf.getName().startsWith(HELPER_CONFIGURATION_PREFIX)) + ); + onCreate.dependsOn(project.getRootProject().getConfigurations() + .matching(conf -> conf.getName().startsWith(HELPER_CONFIGURATION_PREFIX)) + ); // NOTE: Gradle doesn't allow a lambda here ( fails at runtime ) onCreate.doFirst(new Action() { @Override @@ -290,6 +297,31 @@ public void execute(Task task) { project.delete(getExtractDir(rootProject)); } }); + onCreate.doLast(new Action() { + @Override + public void execute(Task task) { + project.getRootProject().getConfigurations() + .matching(config -> config.getName().startsWith(HELPER_CONFIGURATION_PREFIX)) + .forEach(config -> project.copy(spec -> + config.getResolvedConfiguration() + .getResolvedArtifacts() + .forEach(resolvedArtifact -> { + final FileTree files; + File file = resolvedArtifact.getFile(); + if (file.getName().endsWith(".zip")) { + files = project.zipTree(file); + } else if (file.getName().endsWith("tar.gz")) { + files = project.tarTree(file); + } else { + throw new IllegalArgumentException("Can't extract " + file + " unknown file extension"); + } + logger.info("Extracting {}@{}", resolvedArtifact, config); + spec.from(files, s -> s.into(resolvedArtifact.getModuleVersion().getId().getGroup())); + spec.into(getExtractDir(project)); + })) + ); + } + }); }); // When the project evaluated we know of all tasks that use clusters. @@ -347,29 +379,6 @@ public void execute(Task task) { distribution.getFileExtension()); } - - sync.getInputs().files(helperConfiguration); - // NOTE: Gradle doesn't allow a lambda here ( fails at runtime ) - sync.doLast(new Action() { - @Override - public void execute(Task task) { - project.copy(spec -> - helperConfiguration.getResolvedConfiguration().getResolvedArtifacts().forEach(resolvedArtifact -> { - final FileTree files; - File file = resolvedArtifact.getFile(); - if (file.getName().endsWith(".zip")) { - files = project.zipTree(file); - } else if (file.getName().endsWith("tar.gz")) { - files = project.tarTree(file); - } else { - throw new IllegalArgumentException("Can't extract " + file + " unknown file extension"); - } - - spec.from(files, s -> s.into(resolvedArtifact.getModuleVersion().getId().getGroup())); - spec.into(getExtractDir(project)); - })); - } - }); }))); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index 762bcc5ff9b31..bf982fa3aa2d2 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -75,7 +75,7 @@ public static Iterable parameters() { } public void testCurrentExamplePlugin() throws IOException { - FileUtils.copyDirectory(examplePlugin, tmpDir.getRoot()); + FileUtils.copyDirectory(examplePlugin, tmpDir.getRoot(), pathname -> pathname.getPath().contains("/build/") == false); adaptBuildScriptForTest(); @@ -156,5 +156,4 @@ private Path writeBuildScript(String script) { throw new RuntimeException(e); } } - } From 40b32cdcb2978c9868d290be1d6c7f96c3389902 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 10 May 2019 08:49:53 -0400 Subject: [PATCH 067/321] Wait for active shard after close in mixed cluster (#42029) The segment stats can be null in a mixed cluster because we do not wait for active shards after closing an index in 7.x. Closes #40331 --- .../rest-api-spec/test/indices.stats/30_segments.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/30_segments.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/30_segments.yml index 2d4d804063220..e4e820ff2b8bb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/30_segments.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/30_segments.yml @@ -1,9 +1,5 @@ --- setup: - - skip: - version: "all" - reason: "AwaitsFix: https://github.com/elastic/elasticsearch/issues/40331" - - do: indices.create: index: test @@ -47,6 +43,7 @@ setup: - do: indices.close: index: test + wait_for_active_shards: all - do: indices.stats: From 80f4846fdb1798b664b95f2388e0ce2696c28c4a Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 10 May 2019 08:50:55 -0400 Subject: [PATCH 068/321] shouldRollGeneration should execute under read lock (#41696) Translog#shouldRollGeneration should execute under the read lock since it accesses the current writer. --- .../main/java/org/elasticsearch/index/translog/Translog.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 9a1e657199938..7626270b6cdc5 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -553,9 +553,10 @@ public Location add(final Operation operation) throws IOException { * @return {@code true} if the current generation should be rolled to a new generation */ public boolean shouldRollGeneration() { - final long size = this.current.sizeInBytes(); final long threshold = this.indexSettings.getGenerationThresholdSize().getBytes(); - return size > threshold; + try (ReleasableLock ignored = readLock.acquire()) { + return this.current.sizeInBytes() > threshold; + } } /** From e56d557c75dd1eb4ea535fb985160d003b560d25 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 10 May 2019 13:55:27 +0100 Subject: [PATCH 069/321] Recognise direct buffers in heap size docs (#42070) This commit slightly reworks the recommendations in the docs about setting the heap size: * the "rules of thumb" are actually instructions that should be followed * the reason for setting `Xmx` to 50% of the heap size is more subtle than just leaving space for the filesystem cache * it is normal to see Elasticsearch using more memory than `Xmx` * replace `cutoff` and `limit` with `threshold` since all three terms are used interchangeably * since we recommend setting `Xmx` equal to `Xms`, avoid talking about setting `Xmx` in isolation Relates #41954 --- .../important-settings/heap-size.asciidoc | 52 +++++++++++-------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/docs/reference/setup/important-settings/heap-size.asciidoc b/docs/reference/setup/important-settings/heap-size.asciidoc index 77aa23b61df45..890a9786e09a5 100644 --- a/docs/reference/setup/important-settings/heap-size.asciidoc +++ b/docs/reference/setup/important-settings/heap-size.asciidoc @@ -7,42 +7,48 @@ to ensure that Elasticsearch has enough heap available. Elasticsearch will assign the entire heap specified in <> via the `Xms` (minimum heap size) and `Xmx` (maximum -heap size) settings. +heap size) settings. You should set these two settings to be equal to each +other. The value for these setting depends on the amount of RAM available on your -server. Good rules of thumb are: +server: -* Set the minimum heap size (`Xms`) and maximum heap size (`Xmx`) to be equal to - each other. +* Set `Xmx` and `Xms` to no more than 50% of your physical RAM. {es} requires + memory for purposes other than the JVM heap and it is important to leave + space for this. For instance, {es} uses off-heap buffers for efficient + network communication, relies on the operating system's filesystem cache for + efficient access to files, and the JVM itself requires some memory too. It is + normal to observe the {es} process using more memory than the limit + configured with the `Xmx` setting. -* The more heap available to Elasticsearch, the more memory it can use for - caching. But note that too much heap can subject you to long garbage - collection pauses. - -* Set `Xmx` to no more than 50% of your physical RAM, to ensure that there is - enough physical RAM left for kernel file system caches. - -* Don’t set `Xmx` to above the cutoff that the JVM uses for compressed object - pointers (compressed oops); the exact cutoff varies but is near 32 GB. You can - verify that you are under the limit by looking for a line in the logs like the - following: +* Set `Xmx` and `Xms` to no more than the threshold that the JVM uses for + compressed object pointers (compressed oops); the exact threshold varies but + is near 32 GB. You can verify that you are under the threshold by looking for a + line in the logs like the following: + heap size [1.9gb], compressed ordinary object pointers [true] -* Even better, try to stay below the threshold for zero-based compressed oops; - the exact cutoff varies but 26 GB is safe on most systems, but can be as large - as 30 GB on some systems. You can verify that you are under the limit by - starting Elasticsearch with the JVM options `-XX:+UnlockDiagnosticVMOptions - -XX:+PrintCompressedOopsMode` and looking for a line like the following: +* Ideally set `Xmx` and `Xms` to no more than the threshold for zero-based + compressed oops; the exact threshold varies but 26 GB is safe on most + systems, but can be as large as 30 GB on some systems. You can verify that + you are under this threshold by starting {es} with the JVM options + `-XX:+UnlockDiagnosticVMOptions -XX:+PrintCompressedOopsMode` and looking for + a line like the following: + -- heap address: 0x000000011be00000, size: 27648 MB, zero based Compressed Oops -showing that zero-based compressed oops are enabled instead of +showing that zero-based compressed oops are enabled. If zero-based compressed +oops are not enabled then you will see a line like the following instead: heap address: 0x0000000118400000, size: 28672 MB, Compressed Oops with base: 0x00000001183ff000 -- +The more heap available to {es}, the more memory it can use for its internal +caches, but the less memory it leaves available for the operating system to use +for the filesystem cache. Also, larger heaps can cause longer garbage +collection pauses. + Here are examples of how to set the heap size via the jvm.options file: [source,txt] @@ -66,7 +72,7 @@ ES_JAVA_OPTS="-Xms4000m -Xmx4000m" ./bin/elasticsearch <2> <2> Set the minimum and maximum heap size to 4000 MB. NOTE: Configuring the heap for the <> is -different than the above. The values initially populated for the Windows service -can be configured as above but are different after the service has been +different than the above. The values initially populated for the Windows +service can be configured as above but are different after the service has been installed. Consult the <> for additional details. From 860b5c2e2a158a2cc47ebfc839a775d79813e7da Mon Sep 17 00:00:00 2001 From: Yogesh Gaikwad <902768+bizybot@users.noreply.github.com> Date: Fri, 10 May 2019 22:59:38 +1000 Subject: [PATCH 070/321] Increase the sample space for random inner hits name generator (#42057) This commits changes the minimum length for inner hits name to avoid name collision which sometimes failed the test. --- .../org/elasticsearch/index/query/InnerHitBuilderTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java index 54d478a7f6aec..db32c251fd3f4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java @@ -146,7 +146,7 @@ public static InnerHitBuilder randomNestedInnerHits() { } public static InnerHitBuilder randomInnerHits() { InnerHitBuilder innerHits = new InnerHitBuilder(); - innerHits.setName(randomAlphaOfLengthBetween(1, 16)); + innerHits.setName(randomAlphaOfLengthBetween(5, 16)); innerHits.setFrom(randomIntBetween(0, 32)); innerHits.setSize(randomIntBetween(0, 32)); innerHits.setExplain(randomBoolean()); From acc36fa5776e23904ffae0ce5e7db36a60b2e5d2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 10 May 2019 14:02:09 +0100 Subject: [PATCH 071/321] Remove extra `ms` from log message (#42068) This log message logs a `TimeValue` which includes units, but also logs an extra `ms`. This commit removes the extra `ms`. --- .../java/org/elasticsearch/cluster/coordination/JoinHelper.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index 0d4dbb2e688fc..3a52324661fc0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -208,7 +208,7 @@ static Level getLogLevel(TransportException e) { } void logWarnWithTimestamp() { - logger.info(() -> new ParameterizedMessage("last failed join attempt was {} ms ago, failed to join {} with {}", + logger.info(() -> new ParameterizedMessage("last failed join attempt was {} ago, failed to join {} with {}", TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - timestamp)), destination, joinRequest), From c1d31f606425372f3059bf83ef57079406429cd7 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 10 May 2019 08:14:50 -0500 Subject: [PATCH 072/321] [ML] properly nesting objects in document source (#41901) * [ML] properly nesting objects in document source * Throw exception on agg extraction failure, cause it to fail df * throwing error to stop df if unsupported agg is found --- .../transforms/DataFrameTransformTask.java | 3 +- .../pivot/AggregationResultUtils.java | 59 +++++++++++++++++-- .../pivot/AggregationResultUtilsTests.java | 46 +++++++++++++++ .../test/data_frame/preview_transforms.yml | 12 +++- 4 files changed, 112 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index c332d29945aaf..bfe0e4f4d77b1 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -40,6 +40,7 @@ import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.dataframe.transforms.pivot.AggregationResultUtils; import java.util.Arrays; import java.util.Map; @@ -606,7 +607,7 @@ protected void createCheckpoint(ActionListener listener) { } private boolean isIrrecoverableFailure(Exception e) { - return e instanceof IndexNotFoundException; + return e instanceof IndexNotFoundException || e instanceof AggregationResultUtils.AggregationExtractionException; } synchronized void handleFailure(Exception e) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java index b17a65fc4daf1..8c4fa96a144ec 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java @@ -8,6 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; @@ -29,7 +30,7 @@ import static org.elasticsearch.xpack.dataframe.transforms.pivot.SchemaUtil.isNumericType; -final class AggregationResultUtils { +public final class AggregationResultUtils { private static final Logger logger = LogManager.getLogger(AggregationResultUtils.class); /** @@ -77,17 +78,18 @@ public static Stream> extractCompositeAggregationResults(Com // gather the `value` type, otherwise utilize `getValueAsString` so we don't lose formatted outputs. if (isNumericType(fieldType) || (aggResultSingleValue.getValueAsString().equals(String.valueOf(aggResultSingleValue.value())))) { - document.put(aggName, aggResultSingleValue.value()); + updateDocument(document, aggName, aggResultSingleValue.value()); } else { - document.put(aggName, aggResultSingleValue.getValueAsString()); + updateDocument(document, aggName, aggResultSingleValue.getValueAsString()); } } else if (aggResult instanceof ScriptedMetric) { - document.put(aggName, ((ScriptedMetric) aggResult).aggregation()); + updateDocument(document, aggName, ((ScriptedMetric) aggResult).aggregation()); } else { // Execution should never reach this point! // Creating transforms with unsupported aggregations shall not be possible - logger.error("Dataframe Internal Error: unsupported aggregation ["+ aggResult.getName() +"], ignoring"); - assert false; + throw new AggregationExtractionException("unsupported aggregation [{}] with name [{}]", + aggResult.getType(), + aggResult.getName()); } } @@ -97,4 +99,49 @@ public static Stream> extractCompositeAggregationResults(Com }); } + @SuppressWarnings("unchecked") + static void updateDocument(Map document, String fieldName, Object value) { + String[] fieldTokens = fieldName.split("\\."); + if (fieldTokens.length == 1) { + document.put(fieldName, value); + return; + } + Map internalMap = document; + for (int i = 0; i < fieldTokens.length; i++) { + String token = fieldTokens[i]; + if (i == fieldTokens.length - 1) { + if (internalMap.containsKey(token)) { + if (internalMap.get(token) instanceof Map) { + throw new AggregationExtractionException("mixed object types of nested and non-nested fields [{}]", + fieldName); + } else { + throw new AggregationExtractionException("duplicate key value pairs key [{}] old value [{}] duplicate value [{}]", + fieldName, + internalMap.get(token), + value); + } + } + internalMap.put(token, value); + } else { + if (internalMap.containsKey(token)) { + if (internalMap.get(token) instanceof Map) { + internalMap = (Map)internalMap.get(token); + } else { + throw new AggregationExtractionException("mixed object types of nested and non-nested fields [{}]", + fieldName); + } + } else { + Map newMap = new HashMap<>(); + internalMap.put(token, newMap); + internalMap = newMap; + } + } + } + } + + public static class AggregationExtractionException extends ElasticsearchException { + AggregationExtractionException(String msg, Object... args) { + super(msg, args); + } + } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java index 7eb4295111324..1a835c9d19b59 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java @@ -66,6 +66,7 @@ import java.util.stream.Collectors; import static java.util.Arrays.asList; +import static org.hamcrest.CoreMatchers.equalTo; public class AggregationResultUtilsTests extends ESTestCase { @@ -736,6 +737,51 @@ aggTypedName, asMap( assertEquals(documentIdsFirstRun, documentIdsSecondRun); } + @SuppressWarnings("unchecked") + public void testUpdateDocument() { + Map document = new HashMap<>(); + + AggregationResultUtils.updateDocument(document, "foo.bar.baz", 1000L); + AggregationResultUtils.updateDocument(document, "foo.bar.baz2", 2000L); + AggregationResultUtils.updateDocument(document, "bar.field1", 1L); + AggregationResultUtils.updateDocument(document, "metric", 10L); + + assertThat(document.get("metric"), equalTo(10L)); + + Map bar = (Map)document.get("bar"); + + assertThat(bar.get("field1"), equalTo(1L)); + + Map foo = (Map)document.get("foo"); + Map foobar = (Map)foo.get("bar"); + + assertThat(foobar.get("baz"), equalTo(1000L)); + assertThat(foobar.get("baz2"), equalTo(2000L)); + } + + public void testUpdateDocumentWithDuplicate() { + Map document = new HashMap<>(); + + AggregationResultUtils.updateDocument(document, "foo.bar.baz", 1000L); + AggregationResultUtils.AggregationExtractionException exception = + expectThrows(AggregationResultUtils.AggregationExtractionException.class, + () -> AggregationResultUtils.updateDocument(document, "foo.bar.baz", 2000L)); + assertThat(exception.getMessage(), + equalTo("duplicate key value pairs key [foo.bar.baz] old value [1000] duplicate value [2000]")); + } + + public void testUpdateDocumentWithObjectAndNotObject() { + Map document = new HashMap<>(); + + AggregationResultUtils.updateDocument(document, "foo.bar.baz", 1000L); + AggregationResultUtils.AggregationExtractionException exception = + expectThrows(AggregationResultUtils.AggregationExtractionException.class, + () -> AggregationResultUtils.updateDocument(document, "foo.bar", 2000L)); + assertThat(exception.getMessage(), + equalTo("mixed object types of nested and non-nested fields [foo.bar]")); + } + + private void executeTest(GroupConfig groups, Collection aggregationBuilders, Collection pipelineAggregationBuilders, diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index e0b35a64fb479..1d4a190b24e14 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -81,18 +81,28 @@ setup: "group_by": { "airline": {"terms": {"field": "airline"}}, "by-hour": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, - "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + "aggs": { + "avg_response": {"avg": {"field": "responsetime"}}, + "time.max": {"max": {"field": "time"}}, + "time.min": {"min": {"field": "time"}} + } } } - match: { preview.0.airline: foo } - match: { preview.0.by-hour: "2017-02-49 00" } - match: { preview.0.avg_response: 1.0 } + - match: { preview.0.time.max: "2017-02-18T00:30:00.000Z" } + - match: { preview.0.time.min: "2017-02-18T00:00:00.000Z" } - match: { preview.1.airline: bar } - match: { preview.1.by-hour: "2017-02-49 01" } - match: { preview.1.avg_response: 42.0 } + - match: { preview.1.time.max: "2017-02-18T01:00:00.000Z" } + - match: { preview.1.time.min: "2017-02-18T01:00:00.000Z" } - match: { preview.2.airline: foo } - match: { preview.2.by-hour: "2017-02-49 01" } - match: { preview.2.avg_response: 42.0 } + - match: { preview.2.time.max: "2017-02-18T01:01:00.000Z" } + - match: { preview.2.time.min: "2017-02-18T01:01:00.000Z" } --- "Test preview transform with invalid config": From 6d95386f0d4f5ac305976d2ed1771fa26a9130f0 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Fri, 10 May 2019 14:38:10 +0100 Subject: [PATCH 073/321] Simplify handling of keyword field normalizers (#42002) We have a number of places in analysis-handling code where we check if a field type is a keyword field, and if so then extract the normalizer rather than pulling the index-time analyzer. However, a keyword normalizer is really just a special case of an analyzer, so we should be able to simplify this by setting the normalizer as the index-time analyzer at construction time. --- .../subphase/highlight/AnnotatedTextHighlighter.java | 4 ++-- .../indices/analyze/TransportAnalyzeAction.java | 10 ++-------- .../index/mapper/KeywordFieldMapper.java | 3 ++- .../index/termvectors/TermVectorsService.java | 8 +------- .../fetch/subphase/highlight/HighlightUtils.java | 12 ------------ .../fetch/subphase/highlight/PlainHighlighter.java | 2 +- .../fetch/subphase/highlight/UnifiedHighlighter.java | 6 +++--- .../index/mapper/KeywordFieldMapperTests.java | 3 ++- 8 files changed, 13 insertions(+), 35 deletions(-) diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java index 2ba7838b90950..6b1a1c9254cf2 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java @@ -39,8 +39,8 @@ public class AnnotatedTextHighlighter extends UnifiedHighlighter { public static final String NAME = "annotated"; @Override - protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type, HitContext hitContext) { - return new AnnotatedHighlighterAnalyzer(super.getAnalyzer(docMapper, type, hitContext), hitContext); + protected Analyzer getAnalyzer(DocumentMapper docMapper, HitContext hitContext) { + return new AnnotatedHighlighterAnalyzer(super.getAnalyzer(docMapper, hitContext), hitContext); } // Convert the marked-up values held on-disk to plain-text versions for highlighting diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 62d8c0e91da79..55bd593742667 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -50,9 +50,9 @@ import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.CustomAnalyzer; import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NormalizingCharFilterFactory; import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory; -import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -141,14 +141,8 @@ protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId } MappedFieldType fieldType = indexService.mapperService().fullName(request.field()); if (fieldType != null) { - if (fieldType.tokenized()) { + if (fieldType.tokenized() || fieldType instanceof KeywordFieldMapper.KeywordFieldType) { analyzer = fieldType.indexAnalyzer(); - } else if (fieldType instanceof KeywordFieldMapper.KeywordFieldType) { - analyzer = ((KeywordFieldMapper.KeywordFieldType) fieldType).normalizer(); - if (analyzer == null) { - // this will be KeywordAnalyzer - analyzer = fieldType.indexAnalyzer(); - } } else { throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are only supported on tokenized fields"); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 20b4bb37cc7aa..099ae9b2aa7a2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -235,13 +235,14 @@ public String typeName() { return CONTENT_TYPE; } - public NamedAnalyzer normalizer() { + private NamedAnalyzer normalizer() { return normalizer; } public void setNormalizer(NamedAnalyzer normalizer) { checkIfFrozen(); this.normalizer = normalizer; + setIndexAnalyzer(normalizer); } public boolean splitQueriesOnWhitespace() { diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index a05870b842f2d..b53c3d8da427c 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -44,7 +44,6 @@ import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.IdFieldMapper; -import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParseContext; @@ -235,12 +234,7 @@ private static Analyzer getAnalyzerAtField(IndexShard indexShard, String field, analyzer = mapperService.getIndexAnalyzers().get(perFieldAnalyzer.get(field).toString()); } else { MappedFieldType fieldType = mapperService.fullName(field); - if (fieldType instanceof KeywordFieldMapper.KeywordFieldType) { - KeywordFieldMapper.KeywordFieldType keywordFieldType = (KeywordFieldMapper.KeywordFieldType) fieldType; - analyzer = keywordFieldType.normalizer() == null ? keywordFieldType.indexAnalyzer() : keywordFieldType.normalizer(); - } else { - analyzer = fieldType.indexAnalyzer(); - } + analyzer = fieldType.indexAnalyzer(); } if (analyzer == null) { analyzer = mapperService.getIndexAnalyzers().getDefaultIndexAnalyzer(); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java index 6ae302ee87a25..e9ec29cef4ae7 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightUtils.java @@ -18,13 +18,10 @@ */ package org.elasticsearch.search.fetch.subphase.highlight; -import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.highlight.DefaultEncoder; import org.apache.lucene.search.highlight.Encoder; import org.apache.lucene.search.highlight.SimpleHTMLEncoder; import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.SearchContext; @@ -78,13 +75,4 @@ public static class Encoders { public static final Encoder HTML = new SimpleHTMLEncoder(); } - static Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type) { - if (type instanceof KeywordFieldMapper.KeywordFieldType) { - KeywordFieldMapper.KeywordFieldType keywordFieldType = (KeywordFieldMapper.KeywordFieldType) type; - if (keywordFieldType.normalizer() != null) { - return keywordFieldType.normalizer(); - } - } - return docMapper.mappers().indexAnalyzer(); - } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java index ec5071706b031..6ad155104a4cc 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/PlainHighlighter.java @@ -101,7 +101,7 @@ public HighlightField highlight(HighlighterContext highlighterContext) { int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ? 1 : field.fieldOptions().numberOfFragments(); ArrayList fragsList = new ArrayList<>(); List textsToHighlight; - Analyzer analyzer = HighlightUtils.getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType); + Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().getType()).mappers().indexAnalyzer(); final int maxAnalyzedOffset = context.indexShard().indexSettings().getHighlightMaxAnalyzedOffset(); try { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java index 2a75e9c58f4fc..b806fb9cd312f 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java @@ -70,7 +70,7 @@ public HighlightField highlight(HighlighterContext highlighterContext) { int numberOfFragments; try { - final Analyzer analyzer = getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType, + final Analyzer analyzer = getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), hitContext); List fieldValues = loadFieldValues(fieldType, field, context, hitContext); if (fieldValues.size() == 0) { @@ -150,8 +150,8 @@ protected PassageFormatter getPassageFormatter(HitContext hitContext, SearchCont } - protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type, HitContext hitContext) { - return HighlightUtils.getAnalyzer(docMapper, type); + protected Analyzer getAnalyzer(DocumentMapper docMapper, HitContext hitContext) { + return docMapper.mappers().indexAnalyzer(); } protected List loadFieldValues(MappedFieldType fieldType, SearchContextHighlight.Field field, SearchContext context, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index dd7cb17ef127c..1bdf40bcc6708 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -401,7 +401,8 @@ public void testUpdateNormalizer() throws IOException { () -> indexService.mapperService().merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertEquals( - "Mapper for [field] conflicts with existing mapping:\n[mapper [field] has different [normalizer]]", + "Mapper for [field] conflicts with existing mapping:\n" + + "[mapper [field] has different [analyzer], mapper [field] has different [normalizer]]", e.getMessage()); } From f42dcf2ffd7bd25f3f91aa6127515f393cd1860f Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Fri, 10 May 2019 17:07:31 +0300 Subject: [PATCH 074/321] Docs: Tweak list formatting --- docs/reference/sql/language/indices.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/sql/language/indices.asciidoc b/docs/reference/sql/language/indices.asciidoc index 5a3b5514931a2..82c7f30fb041e 100644 --- a/docs/reference/sql/language/indices.asciidoc +++ b/docs/reference/sql/language/indices.asciidoc @@ -95,11 +95,11 @@ when explicitly told so by user - in other words, by default, frozen indices are One can toggle the use of frozen indices through: -::dedicated configuration parameter +dedicated configuration parameter:: Set to `true` properties `index_include_frozen` in the <> or `index.include.frozen` in the drivers to include frozen indices. -::dedicated keyword -Explicitly perform the inclusion through the dedicated `FROZEN` keyword in the `FROM` clause or `INCLUDE FROZEN` in the `SHOW ` commands: +dedicated keyword:: +Explicitly perform the inclusion through the dedicated `FROZEN` keyword in the `FROM` clause or `INCLUDE FROZEN` in the `SHOW` commands: ["source","sql",subs="attributes,callouts,macros"] ---- From da4899f7860c7cee145a094e71705fb0b12243fd Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 10 May 2019 09:31:48 -0500 Subject: [PATCH 075/321] [ML] adding pivot.max_search_page_size option for setting paging size (#41920) * [ML] adding pivot.size option for setting paging size * Changing field name to address PR comments * fixing ctor usage * adjust hlrc for field name change --- .../transforms/pivot/PivotConfig.java | 38 ++++++++++++++++--- .../transforms/pivot/PivotConfigTests.java | 4 +- .../DataFrameTransformDocumentationIT.java | 5 ++- .../dataframe/put_data_frame.asciidoc | 5 +++ .../xpack/core/dataframe/DataFrameField.java | 1 + .../action/PutDataFrameTransformAction.java | 8 ++++ .../transforms/pivot/PivotConfig.java | 24 ++++++++++-- .../transforms/pivot/PivotConfigTests.java | 8 +++- .../integration/DataFrameIntegTestCase.java | 8 +++- .../DataFrameTransformProgressIT.java | 4 +- .../dataframe/transforms/pivot/Pivot.java | 8 ++-- .../transforms/DataFrameIndexerTests.java | 29 +++++++++----- .../transforms/pivot/PivotTests.java | 14 ++++++- .../test/data_frame/transforms_crud.yml | 30 +++++++++++++++ 14 files changed, 154 insertions(+), 32 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java index 0c3a6e3ea890b..6fdbeb8a43a20 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java @@ -39,25 +39,29 @@ public class PivotConfig implements ToXContentObject { private static final ParseField GROUP_BY = new ParseField("group_by"); private static final ParseField AGGREGATIONS = new ParseField("aggregations"); + private static final ParseField MAX_PAGE_SEARCH_SIZE = new ParseField("max_page_search_size"); private final GroupConfig groups; private final AggregationConfig aggregationConfig; + private final Integer maxPageSearchSize; private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("pivot_config", true, - args -> new PivotConfig((GroupConfig) args[0], (AggregationConfig) args[1])); + args -> new PivotConfig((GroupConfig) args[0], (AggregationConfig) args[1], (Integer) args[2])); static { PARSER.declareObject(constructorArg(), (p, c) -> (GroupConfig.fromXContent(p)), GROUP_BY); PARSER.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p), AGGREGATIONS); + PARSER.declareInt(optionalConstructorArg(), MAX_PAGE_SEARCH_SIZE); } public static PivotConfig fromXContent(final XContentParser parser) { return PARSER.apply(parser, null); } - PivotConfig(GroupConfig groups, final AggregationConfig aggregationConfig) { + PivotConfig(GroupConfig groups, final AggregationConfig aggregationConfig, Integer maxPageSearchSize) { this.groups = groups; this.aggregationConfig = aggregationConfig; + this.maxPageSearchSize = maxPageSearchSize; } @Override @@ -65,6 +69,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(GROUP_BY.getPreferredName(), groups); builder.field(AGGREGATIONS.getPreferredName(), aggregationConfig); + if (maxPageSearchSize != null) { + builder.field(MAX_PAGE_SEARCH_SIZE.getPreferredName(), maxPageSearchSize); + } builder.endObject(); return builder; } @@ -77,6 +84,10 @@ public GroupConfig getGroupConfig() { return groups; } + public Integer getMaxPageSearchSize() { + return maxPageSearchSize; + } + @Override public boolean equals(Object other) { if (this == other) { @@ -89,12 +100,14 @@ public boolean equals(Object other) { final PivotConfig that = (PivotConfig) other; - return Objects.equals(this.groups, that.groups) && Objects.equals(this.aggregationConfig, that.aggregationConfig); + return Objects.equals(this.groups, that.groups) + && Objects.equals(this.aggregationConfig, that.aggregationConfig) + && Objects.equals(this.maxPageSearchSize, that.maxPageSearchSize); } @Override public int hashCode() { - return Objects.hash(groups, aggregationConfig); + return Objects.hash(groups, aggregationConfig, maxPageSearchSize); } public static Builder builder() { @@ -104,6 +117,7 @@ public static Builder builder() { public static class Builder { private GroupConfig groups; private AggregationConfig aggregationConfig; + private Integer maxPageSearchSize; /** * Set how to group the source data @@ -135,8 +149,22 @@ public Builder setAggregations(AggregatorFactories.Builder aggregations) { return this; } + /** + * Sets the paging maximum paging maxPageSearchSize that date frame transform can use when + * pulling the data from the source index. + * + * If OOM is triggered, the paging maxPageSearchSize is dynamically reduced so that the transform can continue to gather data. + * + * @param maxPageSearchSize Integer value between 10 and 10_000 + * @return the {@link Builder} with the paging maxPageSearchSize set. + */ + public Builder setMaxPageSearchSize(Integer maxPageSearchSize) { + this.maxPageSearchSize = maxPageSearchSize; + return this; + } + public PivotConfig build() { - return new PivotConfig(groups, aggregationConfig); + return new PivotConfig(groups, aggregationConfig, maxPageSearchSize); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfigTests.java index d2e036d9f1ad2..5cafcb9f419b5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfigTests.java @@ -32,7 +32,9 @@ public class PivotConfigTests extends AbstractXContentTestCase { public static PivotConfig randomPivotConfig() { - return new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomAggregationConfig()); + return new PivotConfig(GroupConfigTests.randomGroupConfig(), + AggregationConfigTests.randomAggregationConfig(), + randomBoolean() ? null : randomIntBetween(10, 10_000)); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index 07713d5371460..6f7832cbf3cff 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -138,8 +138,9 @@ public void testPutDataFrameTransform() throws IOException, InterruptedException // end::put-data-frame-transform-agg-config // tag::put-data-frame-transform-pivot-config PivotConfig pivotConfig = PivotConfig.builder() - .setGroups(groupConfig) - .setAggregationConfig(aggConfig) + .setGroups(groupConfig) // <1> + .setAggregationConfig(aggConfig) // <2> + .setMaxPageSearchSize(1000) // <3> .build(); // end::put-data-frame-transform-pivot-config // tag::put-data-frame-transform-config diff --git a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc index bb1b20aaa1a52..567449c9c25b1 100644 --- a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc @@ -66,6 +66,11 @@ Defines the pivot function `group by` fields and the aggregation to reduce the d -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-pivot-config] -------------------------------------------------- +<1> The `GroupConfig` to use in the pivot +<2> The aggregations to use +<3> The maximum paging size for the transform when pulling data +from the source. The size dynamically adjusts as the transform +is running to recover from and prevent OOM issues. ===== GroupConfig The grouping terms. Defines the group by and destination fields diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java index 71bf14cdeb4a5..c61ed2ddde8be 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java @@ -27,6 +27,7 @@ public final class DataFrameField { public static final ParseField SOURCE = new ParseField("source"); public static final ParseField DESTINATION = new ParseField("dest"); public static final ParseField FORCE = new ParseField("force"); + public static final ParseField MAX_PAGE_SEARCH_SIZE = new ParseField("max_page_search_size"); /** * Fields for checkpointing diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java index 059bad3494c07..2608fb87761f9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java @@ -56,6 +56,14 @@ public static Request fromXContent(final XContentParser parser, final String id) @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; + if(config.getPivotConfig() != null + && config.getPivotConfig().getMaxPageSearchSize() != null + && (config.getPivotConfig().getMaxPageSearchSize() < 10 || config.getPivotConfig().getMaxPageSearchSize() > 10_000)) { + validationException = addValidationError( + "pivot.max_page_search_size [" + + config.getPivotConfig().getMaxPageSearchSize() + "] must be greater than 10 and less than 10,000", + validationException); + } for(String failure : config.getPivotConfig().aggFieldValidation()) { validationException = addValidationError(failure, validationException); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java index 79a0a7fc1bfa8..ab2f7d489ac9a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -35,6 +36,7 @@ public class PivotConfig implements Writeable, ToXContentObject { private static final String NAME = "data_frame_transform_pivot"; private final GroupConfig groups; private final AggregationConfig aggregationConfig; + private final Integer maxPageSearchSize; private static final ConstructingObjectParser STRICT_PARSER = createParser(false); private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); @@ -61,7 +63,7 @@ private static ConstructingObjectParser createParser(boolean throw new IllegalArgumentException("Required [aggregations]"); } - return new PivotConfig(groups, aggregationConfig); + return new PivotConfig(groups, aggregationConfig, (Integer)args[3]); }); parser.declareObject(constructorArg(), @@ -69,18 +71,21 @@ private static ConstructingObjectParser createParser(boolean parser.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p, lenient), DataFrameField.AGGREGATIONS); parser.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p, lenient), DataFrameField.AGGS); + parser.declareInt(optionalConstructorArg(), DataFrameField.MAX_PAGE_SEARCH_SIZE); return parser; } - public PivotConfig(final GroupConfig groups, final AggregationConfig aggregationConfig) { + public PivotConfig(final GroupConfig groups, final AggregationConfig aggregationConfig, Integer maxPageSearchSize) { this.groups = ExceptionsHelper.requireNonNull(groups, DataFrameField.GROUP_BY.getPreferredName()); this.aggregationConfig = ExceptionsHelper.requireNonNull(aggregationConfig, DataFrameField.AGGREGATIONS.getPreferredName()); + this.maxPageSearchSize = maxPageSearchSize; } public PivotConfig(StreamInput in) throws IOException { this.groups = new GroupConfig(in); this.aggregationConfig = new AggregationConfig(in); + this.maxPageSearchSize = in.readOptionalInt(); } @Override @@ -88,6 +93,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(DataFrameField.GROUP_BY.getPreferredName(), groups); builder.field(DataFrameField.AGGREGATIONS.getPreferredName(), aggregationConfig); + if (maxPageSearchSize != null) { + builder.field(DataFrameField.MAX_PAGE_SEARCH_SIZE.getPreferredName(), maxPageSearchSize); + } builder.endObject(); return builder; } @@ -113,6 +121,7 @@ public void toCompositeAggXContent(XContentBuilder builder, Params params) throw public void writeTo(StreamOutput out) throws IOException { groups.writeTo(out); aggregationConfig.writeTo(out); + out.writeOptionalInt(maxPageSearchSize); } public AggregationConfig getAggregationConfig() { @@ -123,6 +132,11 @@ public GroupConfig getGroupConfig() { return groups; } + @Nullable + public Integer getMaxPageSearchSize() { + return maxPageSearchSize; + } + @Override public boolean equals(Object other) { if (this == other) { @@ -135,12 +149,14 @@ public boolean equals(Object other) { final PivotConfig that = (PivotConfig) other; - return Objects.equals(this.groups, that.groups) && Objects.equals(this.aggregationConfig, that.aggregationConfig); + return Objects.equals(this.groups, that.groups) + && Objects.equals(this.aggregationConfig, that.aggregationConfig) + && Objects.equals(this.maxPageSearchSize, that.maxPageSearchSize); } @Override public int hashCode() { - return Objects.hash(groups, aggregationConfig); + return Objects.hash(groups, aggregationConfig, maxPageSearchSize); } public boolean isValid() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java index 342e007f21284..2f93f50d4d136 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java @@ -24,11 +24,15 @@ public class PivotConfigTests extends AbstractSerializingDataFrameTestCase { public static PivotConfig randomPivotConfig() { - return new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomAggregationConfig()); + return new PivotConfig(GroupConfigTests.randomGroupConfig(), + AggregationConfigTests.randomAggregationConfig(), + randomBoolean() ? null : randomIntBetween(10, 10_000)); } public static PivotConfig randomInvalidPivotConfig() { - return new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomInvalidAggregationConfig()); + return new PivotConfig(GroupConfigTests.randomGroupConfig(), + AggregationConfigTests.randomInvalidAggregationConfig(), + randomBoolean() ? null : randomIntBetween(10, 10_000)); } @Override diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java index ba6a6137789a3..3a6ab2e5b71d2 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java @@ -172,7 +172,13 @@ protected AggregationConfig createAggConfig(AggregatorFactories.Builder aggregat protected PivotConfig createPivotConfig(Map groups, AggregatorFactories.Builder aggregations) throws Exception { - return new PivotConfig(createGroupConfig(groups), createAggConfig(aggregations)); + return createPivotConfig(groups, aggregations, null); + } + + protected PivotConfig createPivotConfig(Map groups, + AggregatorFactories.Builder aggregations, + Integer size) throws Exception { + return new PivotConfig(createGroupConfig(groups), createAggConfig(aggregations), size); } protected DataFrameTransformConfig createTransformConfig(String id, diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java index d338d6949f07b..194d35e8ba636 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java @@ -130,7 +130,7 @@ public void testGetProgress() throws Exception { AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); aggs.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); AggregationConfig aggregationConfig = new AggregationConfig(Collections.emptyMap(), aggs); - PivotConfig pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig); + PivotConfig pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig, null); DataFrameTransformConfig config = new DataFrameTransformConfig("get_progress_transform", sourceConfig, destConfig, @@ -149,7 +149,7 @@ public void testGetProgress() throws Exception { QueryConfig queryConfig = new QueryConfig(Collections.emptyMap(), QueryBuilders.termQuery("user_id", "user_26")); - pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig); + pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig, null); sourceConfig = new SourceConfig(new String[]{REVIEWS_INDEX_NAME}, queryConfig); config = new DataFrameTransformConfig("get_progress_transform", sourceConfig, diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java index 0e5231442d18b..8205f2576da68 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java @@ -76,13 +76,15 @@ public void deduceMappings(Client client, SourceConfig sourceConfig, final Actio * the page size, the type of aggregations and the data. As the page size is the number of buckets we return * per page the page size is a multiplier for the costs of aggregating bucket. * - * Initially this returns a default, in future it might inspect the configuration and base the initial size - * on the aggregations used. + * The user may set a maximum in the {@link PivotConfig#getMaxPageSearchSize()}, but if that is not provided, + * the default {@link Pivot#DEFAULT_INITIAL_PAGE_SIZE} is used. + * + * In future we might inspect the configuration and base the initial size on the aggregations used. * * @return the page size */ public int getInitialPageSize() { - return DEFAULT_INITIAL_PAGE_SIZE; + return config.getMaxPageSearchSize() == null ? DEFAULT_INITIAL_PAGE_SIZE : config.getMaxPageSearchSize(); } public SearchRequest buildSearchRequest(SourceConfig sourceConfig, Map position, int pageSize) { diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java index f3f3255f07a6d..43198c6edfcf3 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java @@ -23,7 +23,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.xpack.core.dataframe.transforms.pivot.AggregationConfigTests; +import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfigTests; +import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; @@ -39,7 +41,10 @@ import java.util.function.Consumer; import java.util.function.Function; +import static org.elasticsearch.xpack.core.dataframe.transforms.DestConfigTests.randomDestConfig; +import static org.elasticsearch.xpack.core.dataframe.transforms.SourceConfigTests.randomSourceConfig; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -169,9 +174,15 @@ public void setUpMocks() { } public void testPageSizeAdapt() throws InterruptedException { - DataFrameTransformConfig config = DataFrameTransformConfigTests.randomDataFrameTransformConfig(); + Integer pageSize = randomBoolean() ? null : randomIntBetween(500, 10_000); + DataFrameTransformConfig config = new DataFrameTransformConfig(randomAlphaOfLength(10), + randomSourceConfig(), + randomDestConfig(), + null, + new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomAggregationConfig(), pageSize), + randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000)); AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); - + final long initialPageSize = pageSize == null ? Pivot.DEFAULT_INITIAL_PAGE_SIZE : pageSize; Function searchFunction = searchRequest -> { throw new SearchPhaseExecutionException("query", "Partial shards failure", new ShardSearchFailure[] { new ShardSearchFailure(new CircuitBreakingException("to much memory", 110, 100, Durability.TRANSIENT)) }); @@ -179,9 +190,7 @@ public void testPageSizeAdapt() throws InterruptedException { Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); - Consumer failureConsumer = e -> { - fail("expected circuit breaker exception to be handled"); - }; + Consumer failureConsumer = e -> fail("expected circuit breaker exception to be handled"); final ExecutorService executor = Executors.newFixedThreadPool(1); try { @@ -197,8 +206,8 @@ public void testPageSizeAdapt() throws InterruptedException { latch.countDown(); awaitBusy(() -> indexer.getState() == IndexerState.STOPPED); long pageSizeAfterFirstReduction = indexer.getPageSize(); - assertTrue(Pivot.DEFAULT_INITIAL_PAGE_SIZE > pageSizeAfterFirstReduction); - assertTrue(pageSizeAfterFirstReduction > DataFrameIndexer.MINIMUM_PAGE_SIZE); + assertThat(initialPageSize, greaterThan(pageSizeAfterFirstReduction)); + assertThat(pageSizeAfterFirstReduction, greaterThan((long)DataFrameIndexer.MINIMUM_PAGE_SIZE)); // run indexer a 2nd time final CountDownLatch secondRunLatch = indexer.newLatch(1); @@ -211,8 +220,8 @@ public void testPageSizeAdapt() throws InterruptedException { awaitBusy(() -> indexer.getState() == IndexerState.STOPPED); // assert that page size has been reduced again - assertTrue(pageSizeAfterFirstReduction > indexer.getPageSize()); - assertTrue(pageSizeAfterFirstReduction > DataFrameIndexer.MINIMUM_PAGE_SIZE); + assertThat(pageSizeAfterFirstReduction, greaterThan((long)indexer.getPageSize())); + assertThat(pageSizeAfterFirstReduction, greaterThan((long)DataFrameIndexer.MINIMUM_PAGE_SIZE)); } finally { executor.shutdownNow(); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java index 4c434cdbee7fd..20ea84502ed82 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java @@ -107,6 +107,16 @@ public void testValidateNonExistingIndex() throws Exception { assertInvalidTransform(client, source, pivot); } + public void testInitialPageSize() throws Exception { + int expectedPageSize = 1000; + + Pivot pivot = new Pivot(new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig(), expectedPageSize)); + assertThat(pivot.getInitialPageSize(), equalTo(expectedPageSize)); + + pivot = new Pivot(new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig(), null)); + assertThat(pivot.getInitialPageSize(), equalTo(Pivot.DEFAULT_INITIAL_PAGE_SIZE)); + } + public void testSearchFailure() throws Exception { // test a failure during the search operation, transform creation fails if // search has failures although they might just be temporary @@ -177,11 +187,11 @@ protected void } private PivotConfig getValidPivotConfig() throws IOException { - return new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig()); + return new PivotConfig(GroupConfigTests.randomGroupConfig(), getValidAggregationConfig(), null); } private PivotConfig getValidPivotConfig(AggregationConfig aggregationConfig) throws IOException { - return new PivotConfig(GroupConfigTests.randomGroupConfig(), aggregationConfig); + return new PivotConfig(GroupConfigTests.randomGroupConfig(), aggregationConfig, null); } private AggregationConfig getValidAggregationConfig() throws IOException { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml index 40af091a91bd9..65945b6ab7429 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml @@ -303,6 +303,36 @@ setup: } } --- +"Test put config with invalid pivot size": + - do: + catch: /pivot\.max_page_search_size \[5\] must be greater than 10 and less than 10,000/ + data_frame.put_data_frame_transform: + transform_id: "airline-transform" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-dest-index" }, + "pivot": { + "max_page_search_size": 5, + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + - do: + catch: /pivot\.max_page_search_size \[15000\] must be greater than 10 and less than 10,000/ + data_frame.put_data_frame_transform: + transform_id: "airline-transform" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-dest-index" }, + "pivot": { + "max_page_search_size": 15000, + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } +--- "Test creation failures due to duplicate and conflicting field names": - do: catch: /duplicate field \[airline\] detected/ From 7953f9ac71850c8483f5b6b9b764f55ddf6d4119 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Fri, 10 May 2019 08:43:35 -0600 Subject: [PATCH 076/321] Remove close method in PageCacheRecycler/Recycler (#41917) The changes in #39317 brought to light some concurrency issues in the close method of Recyclers as we do not wait for threads running in the threadpool to be finished prior to the closing of the PageCacheRecycler and the Recyclers that are used internally. #41695 was opened to address the concurrent close issues but upon review, the closing of these classes is not really needed as the instances should be become available for garbage collection once there is no longer a reference to the closed node. Closes #41683 --- .../client/transport/TransportClient.java | 2 -- .../common/recycler/AbstractRecycler.java | 5 ----- .../common/recycler/ConcurrentDequeRecycler.java | 7 ------- .../elasticsearch/common/recycler/DequeRecycler.java | 10 ---------- .../elasticsearch/common/recycler/FilterRecycler.java | 5 ----- .../elasticsearch/common/recycler/NoneRecycler.java | 5 ----- .../org/elasticsearch/common/recycler/Recycler.java | 4 +--- .../org/elasticsearch/common/recycler/Recyclers.java | 7 ------- .../elasticsearch/common/util/PageCacheRecycler.java | 9 +-------- server/src/main/java/org/elasticsearch/node/Node.java | 3 --- .../common/recycler/AbstractRecyclerTestCase.java | 10 ---------- 11 files changed, 2 insertions(+), 65 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java index b5720c023f095..4c2f4932de2f2 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -184,7 +184,6 @@ private static ClientTemplate buildTemplate(Settings providedSettings, Settings resourcesToClose.add(circuitBreakerService); PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings); BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService, CircuitBreaker.REQUEST); - resourcesToClose.add(pageCacheRecycler); modules.add(settingsModule); NetworkModule networkModule = new NetworkModule(settings, true, pluginsService.filterPlugins(NetworkPlugin.class), threadPool, bigArrays, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, null); @@ -376,7 +375,6 @@ public void close() { closeables.add(plugin); } closeables.add(() -> ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS)); - closeables.add(injector.getInstance(PageCacheRecycler.class)); IOUtils.closeWhileHandlingException(closeables); } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java b/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java index 05fa525972689..546d801d70b47 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/AbstractRecycler.java @@ -28,9 +28,4 @@ protected AbstractRecycler(Recycler.C c) { this.c = c; } - @Override - public void close() { - // no-op by default - } - } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java b/server/src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java index 04103c5e274d9..54374cc3bdebe 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/ConcurrentDequeRecycler.java @@ -37,13 +37,6 @@ public ConcurrentDequeRecycler(C c, int maxSize) { this.size = new AtomicInteger(); } - @Override - public void close() { - assert deque.size() == size.get(); - super.close(); - size.set(0); - } - @Override public V obtain() { final V v = super.obtain(); diff --git a/server/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java b/server/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java index a40befe9d8191..0f201133eceea 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java @@ -36,16 +36,6 @@ public DequeRecycler(C c, Deque queue, int maxSize) { this.maxSize = maxSize; } - @Override - public void close() { - // call destroy() for every cached object - for (T t : deque) { - c.destroy(t); - } - // finally get rid of all references - deque.clear(); - } - @Override public V obtain() { final T v = deque.pollFirst(); diff --git a/server/src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java b/server/src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java index 426185173e581..5011402f6d97a 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/FilterRecycler.java @@ -34,9 +34,4 @@ public Recycler.V obtain() { return wrap(getDelegate().obtain()); } - @Override - public void close() { - getDelegate().close(); - } - } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java b/server/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java index 865182b88e104..102f1d424305a 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java @@ -31,11 +31,6 @@ public V obtain() { return new NV<>(c.newInstance()); } - @Override - public void close() { - // no-op - } - public static class NV implements Recycler.V { T value; diff --git a/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java b/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java index 161e6463423f3..95a67fdf8e015 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/Recycler.java @@ -25,7 +25,7 @@ * A recycled object, note, implementations should support calling obtain and then recycle * on different threads. */ -public interface Recycler extends Releasable { +public interface Recycler { interface Factory { Recycler build(); @@ -53,8 +53,6 @@ interface V extends Releasable { } - void close(); - V obtain(); } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java index 3ea9d17c25f19..5bfd3448e2336 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java @@ -145,13 +145,6 @@ protected Recycler getDelegate() { return recyclers[slot()]; } - @Override - public void close() { - for (Recycler recycler : recyclers) { - recycler.close(); - } - } - }; } diff --git a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java index 4ca408e044170..40b9a8c7e9468 100644 --- a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java @@ -20,8 +20,6 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.recycler.AbstractRecyclerC; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Setting; @@ -39,7 +37,7 @@ import static org.elasticsearch.common.recycler.Recyclers.none; /** A recycler of fixed-size pages. */ -public class PageCacheRecycler implements Releasable { +public class PageCacheRecycler { public static final Setting TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope); @@ -73,11 +71,6 @@ public class PageCacheRecycler implements Releasable { NON_RECYCLING_INSTANCE = new PageCacheRecycler(Settings.builder().put(LIMIT_HEAP_SETTING.getKey(), "0%").build()); } - @Override - public void close() { - Releasables.close(true, bytePage, intPage, longPage, objectPage); - } - public PageCacheRecycler(Settings settings) { final Type type = TYPE_SETTING.get(settings); final long limit = LIMIT_HEAP_SETTING.get(settings).getBytes(); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index bd7dad26b0c0e..4b77c06447836 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -378,7 +378,6 @@ protected Node( PageCacheRecycler pageCacheRecycler = createPageCacheRecycler(settings); BigArrays bigArrays = createBigArrays(pageCacheRecycler, circuitBreakerService); - resourcesToClose.add(pageCacheRecycler); modules.add(settingsModule); List namedWriteables = Stream.of( NetworkModule.getNamedWriteables().stream(), @@ -844,8 +843,6 @@ public synchronized void close() throws IOException { toClose.add(() -> stopWatch.stop().start("node_environment")); toClose.add(injector.getInstance(NodeEnvironment.class)); - toClose.add(() -> stopWatch.stop().start("page_cache_recycler")); - toClose.add(injector.getInstance(PageCacheRecycler.class)); toClose.add(stopWatch::stop); if (logger.isTraceEnabled()) { diff --git a/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java b/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java index be7799fcd6c67..d2d12b32da4e1 100644 --- a/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTestCase.java @@ -99,7 +99,6 @@ public void testReuse() { assertNotSame(b1, b2); } o.close(); - r.close(); } public void testRecycle() { @@ -111,7 +110,6 @@ public void testRecycle() { o = r.obtain(); assertRecycled(o.v()); o.close(); - r.close(); } public void testDoubleRelease() { @@ -128,7 +126,6 @@ public void testDoubleRelease() { final Recycler.V v2 = r.obtain(); final Recycler.V v3 = r.obtain(); assertNotSame(v2.v(), v3.v()); - r.close(); } public void testDestroyWhenOverCapacity() { @@ -152,9 +149,6 @@ public void testDestroyWhenOverCapacity() { // release first ref, verify for destruction o.close(); assertDead(data); - - // close the rest - r.close(); } public void testClose() { @@ -171,10 +165,6 @@ public void testClose() { // verify that recycle() ran assertRecycled(data); - - // closing the recycler should mark recycled instances via destroy() - r.close(); - assertDead(data); } } From 71dd6e5980f7e59cbec5a697a95db45aa647d679 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 10 May 2019 16:28:28 +0100 Subject: [PATCH 077/321] Mute failing AsyncTwoPhaseIndexerTests See https://github.com/elastic/elasticsearch/issues/42084 --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index e56491bdb5764..27fba82338a1c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -222,6 +222,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStateMachine() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); @@ -264,6 +265,7 @@ public void testStateMachineBrokenSearch() throws InterruptedException { } } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStop_AfterIndexerIsFinished() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); @@ -283,6 +285,7 @@ public void testStop_AfterIndexerIsFinished() throws InterruptedException { } } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStop_WhileIndexing() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); From 31a7df86207dfc854c4b6fdba9edf3130b2d64df Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 10 May 2019 11:47:05 -0400 Subject: [PATCH 078/321] Remove reference to fs.data.spins in docs We long ago removed fs.data.spins from the nodes stats. This commit removes reference to this in the docs. --- docs/reference/cluster/nodes-stats.asciidoc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 4bd3c2c9647a5..bb24dffd40f7d 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -125,11 +125,6 @@ information that concern the file system: `fs.data.available_in_bytes`:: Total number of bytes available to this Java virtual machine on this file store -`fs.data.spins` (Linux only):: - Indicates if the file store is backed by spinning storage. - `null` means we could not determine it, `true` means the device possibly spins - and `false` means it does not (ex: solid-state disks). - `fs.io_stats.devices` (Linux only):: Array of disk metrics for each device that is backing an Elasticsearch data path. These disk metrics are probed periodically From eb42fa836899381ae4e0e77c00099b91ce1c0d81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 10 May 2019 18:07:05 +0200 Subject: [PATCH 079/321] Change IndexAnalyzers default analyzer access (#42011) Currently IndexAnalyzers keeps the three default as separate class members although they should refer to the same analyzers held in the additional analyzers map under the default names. This assumption should be made more explicit by keeping all analyzers in the map. This change adapts the constructor to check all the default entries are there and the getters to reach into the map with the default names when needed. --- .../metadata/MetaDataIndexUpgradeService.java | 2 +- .../index/analysis/AnalysisRegistry.java | 29 +++---- .../index/analysis/IndexAnalyzers.java | 29 +++---- .../index/analysis/AnalysisRegistryTests.java | 4 +- .../index/analysis/IndexAnalyzersTests.java | 87 +++++++++++++++++++ .../index/mapper/TypeParsersTests.java | 41 +++++---- .../index/engine/TranslogHandler.java | 7 +- 7 files changed, 144 insertions(+), 55 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/index/analysis/IndexAnalyzersTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index d3520da670289..483835f633e7e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -195,7 +195,7 @@ public Set> entrySet() { } }; try (IndexAnalyzers fakeIndexAnalzyers = - new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap, analyzerMap)) { + new IndexAnalyzers(indexSettings, analyzerMap, analyzerMap, analyzerMap)) { MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService, mapperRegistry, () -> null); mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 483a1b4a7e563..c4be6edd49069 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -51,6 +51,11 @@ public final class AnalysisRegistry implements Closeable { public static final String INDEX_ANALYSIS_CHAR_FILTER = "index.analysis.char_filter"; public static final String INDEX_ANALYSIS_FILTER = "index.analysis.filter"; public static final String INDEX_ANALYSIS_TOKENIZER = "index.analysis.tokenizer"; + + public static final String DEFAULT_ANALYZER_NAME = "default"; + public static final String DEFAULT_SEARCH_ANALYZER_NAME = "default_search"; + public static final String DEFAULT_SEARCH_QUOTED_ANALYZER_NAME = "default_search_quoted"; + private final PrebuiltAnalysis prebuiltAnalysis; private final Map cachedAnalyzer = new ConcurrentHashMap<>(); @@ -439,37 +444,29 @@ public IndexAnalyzers build(IndexSettings indexSettings, "whitespace", () -> new WhitespaceTokenizer(), tokenFilterFactoryFactories, charFilterFactoryFactories); } - if (!analyzers.containsKey("default")) { - NamedAnalyzer defaultAnalyzer = produceAnalyzer("default", new StandardAnalyzerProvider(indexSettings, null, "default", - Settings.Builder.EMPTY_SETTINGS), tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); - analyzers.put("default", defaultAnalyzer); - } - if (!analyzers.containsKey("default_search")) { - analyzers.put("default_search", analyzers.get("default")); + if (!analyzers.containsKey(DEFAULT_ANALYZER_NAME)) { + analyzers.put(DEFAULT_ANALYZER_NAME, + produceAnalyzer(DEFAULT_ANALYZER_NAME, + new StandardAnalyzerProvider(indexSettings, null, DEFAULT_ANALYZER_NAME, Settings.Builder.EMPTY_SETTINGS), + tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories)); } - if (!analyzers.containsKey("default_search_quoted")) { - analyzers.put("default_search_quoted", analyzers.get("default_search")); - } - - NamedAnalyzer defaultAnalyzer = analyzers.get("default"); + NamedAnalyzer defaultAnalyzer = analyzers.get(DEFAULT_ANALYZER_NAME); if (defaultAnalyzer == null) { throw new IllegalArgumentException("no default analyzer configured"); } defaultAnalyzer.checkAllowedInMode(AnalysisMode.ALL); + if (analyzers.containsKey("default_index")) { throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use " + "[index.analysis.analyzer.default] instead for index [" + index.getName() + "]"); } - NamedAnalyzer defaultSearchAnalyzer = analyzers.getOrDefault("default_search", defaultAnalyzer); - NamedAnalyzer defaultSearchQuoteAnalyzer = analyzers.getOrDefault("default_search_quote", defaultSearchAnalyzer); for (Map.Entry analyzer : analyzers.entrySet()) { if (analyzer.getKey().startsWith("_")) { throw new IllegalArgumentException("analyzer name must not start with '_'. got \"" + analyzer.getKey() + "\""); } } - return new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultSearchAnalyzer, defaultSearchQuoteAnalyzer, analyzers, normalizers, - whitespaceNormalizers); + return new IndexAnalyzers(indexSettings, analyzers, normalizers, whitespaceNormalizers); } private static NamedAnalyzer produceAnalyzer(String name, AnalyzerProvider analyzerFactory, diff --git a/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java b/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java index 4cb0b9aa324c9..4f1cbeb4022ac 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/IndexAnalyzers.java @@ -25,9 +25,13 @@ import java.io.Closeable; import java.io.IOException; import java.util.Map; +import java.util.Objects; import java.util.stream.Stream; import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_ANALYZER_NAME; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_ANALYZER_NAME; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_QUOTED_ANALYZER_NAME; /** * IndexAnalyzers contains a name to analyzer mapping for a specific index. @@ -37,23 +41,18 @@ * @see AnalysisRegistry */ public final class IndexAnalyzers extends AbstractIndexComponent implements Closeable { - private final NamedAnalyzer defaultIndexAnalyzer; - private final NamedAnalyzer defaultSearchAnalyzer; - private final NamedAnalyzer defaultSearchQuoteAnalyzer; private final Map analyzers; private final Map normalizers; private final Map whitespaceNormalizers; - public IndexAnalyzers(IndexSettings indexSettings, NamedAnalyzer defaultIndexAnalyzer, NamedAnalyzer defaultSearchAnalyzer, - NamedAnalyzer defaultSearchQuoteAnalyzer, Map analyzers, - Map normalizers, Map whitespaceNormalizers) { + public IndexAnalyzers(IndexSettings indexSettings, Map analyzers, Map normalizers, + Map whitespaceNormalizers) { super(indexSettings); - if (defaultIndexAnalyzer.name().equals("default") == false) { - throw new IllegalStateException("default analyzer must have the name [default] but was: [" + defaultIndexAnalyzer.name() + "]"); + Objects.requireNonNull(analyzers.get(DEFAULT_ANALYZER_NAME), "the default analyzer must be set"); + if (analyzers.get(DEFAULT_ANALYZER_NAME).name().equals(DEFAULT_ANALYZER_NAME) == false) { + throw new IllegalStateException( + "default analyzer must have the name [default] but was: [" + analyzers.get(DEFAULT_ANALYZER_NAME).name() + "]"); } - this.defaultIndexAnalyzer = defaultIndexAnalyzer; - this.defaultSearchAnalyzer = defaultSearchAnalyzer; - this.defaultSearchQuoteAnalyzer = defaultSearchQuoteAnalyzer; this.analyzers = unmodifiableMap(analyzers); this.normalizers = unmodifiableMap(normalizers); this.whitespaceNormalizers = unmodifiableMap(whitespaceNormalizers); @@ -84,21 +83,21 @@ public NamedAnalyzer getWhitespaceNormalizer(String name) { * Returns the default index analyzer for this index */ public NamedAnalyzer getDefaultIndexAnalyzer() { - return defaultIndexAnalyzer; + return analyzers.get(DEFAULT_ANALYZER_NAME); } /** - * Returns the default search analyzer for this index + * Returns the default search analyzer for this index. If not set, this will return the default analyzer */ public NamedAnalyzer getDefaultSearchAnalyzer() { - return defaultSearchAnalyzer; + return analyzers.getOrDefault(DEFAULT_SEARCH_ANALYZER_NAME, getDefaultIndexAnalyzer()); } /** * Returns the default search quote analyzer for this index */ public NamedAnalyzer getDefaultSearchQuoteAnalyzer() { - return defaultSearchQuoteAnalyzer; + return analyzers.getOrDefault(DEFAULT_SEARCH_QUOTED_ANALYZER_NAME, getDefaultSearchAnalyzer()); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index d75b359863137..f7437ad8ec5a9 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -123,9 +123,9 @@ public TokenStream create(TokenStream tokenStream) { Analyzer analyzer = new CustomAnalyzer("tokenizerName", null, new CharFilterFactory[0], new TokenFilterFactory[] { tokenFilter }); MapperException ex = expectThrows(MapperException.class, () -> emptyRegistry.build(IndexSettingsModule.newIndexSettings("index", settings), - singletonMap("default", new PreBuiltAnalyzerProvider("my_analyzer", AnalyzerScope.INDEX, analyzer)), emptyMap(), + singletonMap("default", new PreBuiltAnalyzerProvider("default", AnalyzerScope.INDEX, analyzer)), emptyMap(), emptyMap(), emptyMap(), emptyMap())); - assertEquals("analyzer [my_analyzer] contains filters [my_filter] that are not allowed to run in all mode.", ex.getMessage()); + assertEquals("analyzer [default] contains filters [my_filter] that are not allowed to run in all mode.", ex.getMessage()); } public void testOverrideDefaultIndexAnalyzerIsUnsupported() { diff --git a/server/src/test/java/org/elasticsearch/index/analysis/IndexAnalyzersTests.java b/server/src/test/java/org/elasticsearch/index/analysis/IndexAnalyzersTests.java new file mode 100644 index 0000000000000..f5808c4b0da5c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/analysis/IndexAnalyzersTests.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class IndexAnalyzersTests extends ESTestCase { + + /** + * test the checks in the constructor + */ + public void testAnalyzerMapChecks() { + Map analyzers = new HashMap<>(); + { + NullPointerException ex = expectThrows(NullPointerException.class, + () -> new IndexAnalyzers(IndexSettingsModule.newIndexSettings("index", Settings.EMPTY), analyzers, + Collections.emptyMap(), Collections.emptyMap())); + assertEquals("the default analyzer must be set", ex.getMessage()); + } + { + analyzers.put(AnalysisRegistry.DEFAULT_ANALYZER_NAME, + new NamedAnalyzer("otherName", AnalyzerScope.INDEX, new StandardAnalyzer())); + IllegalStateException ex = expectThrows(IllegalStateException.class, + () -> new IndexAnalyzers(IndexSettingsModule.newIndexSettings("index", Settings.EMPTY), analyzers, + Collections.emptyMap(), Collections.emptyMap())); + assertEquals("default analyzer must have the name [default] but was: [otherName]", ex.getMessage()); + } + } + + public void testAnalyzerDefaults() throws IOException { + Map analyzers = new HashMap<>(); + NamedAnalyzer analyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); + analyzers.put(AnalysisRegistry.DEFAULT_ANALYZER_NAME, analyzer); + + // if only "default" is set in the map, all getters should return the same analyzer + try (IndexAnalyzers indexAnalyzers = new IndexAnalyzers(IndexSettingsModule.newIndexSettings("index", Settings.EMPTY), analyzers, + Collections.emptyMap(), Collections.emptyMap())) { + assertSame(analyzer, indexAnalyzers.getDefaultIndexAnalyzer()); + assertSame(analyzer, indexAnalyzers.getDefaultSearchAnalyzer()); + assertSame(analyzer, indexAnalyzers.getDefaultSearchQuoteAnalyzer()); + } + + analyzers.put(AnalysisRegistry.DEFAULT_SEARCH_ANALYZER_NAME, + new NamedAnalyzer("my_search_analyzer", AnalyzerScope.INDEX, new StandardAnalyzer())); + try (IndexAnalyzers indexAnalyzers = new IndexAnalyzers(IndexSettingsModule.newIndexSettings("index", Settings.EMPTY), analyzers, + Collections.emptyMap(), Collections.emptyMap())) { + assertSame(analyzer, indexAnalyzers.getDefaultIndexAnalyzer()); + assertEquals("my_search_analyzer", indexAnalyzers.getDefaultSearchAnalyzer().name()); + assertEquals("my_search_analyzer", indexAnalyzers.getDefaultSearchQuoteAnalyzer().name()); + } + + analyzers.put(AnalysisRegistry.DEFAULT_SEARCH_QUOTED_ANALYZER_NAME, + new NamedAnalyzer("my_search_quote_analyzer", AnalyzerScope.INDEX, new StandardAnalyzer())); + try (IndexAnalyzers indexAnalyzers = new IndexAnalyzers(IndexSettingsModule.newIndexSettings("index", Settings.EMPTY), analyzers, + Collections.emptyMap(), Collections.emptyMap())) { + assertSame(analyzer, indexAnalyzers.getDefaultIndexAnalyzer()); + assertEquals("my_search_analyzer", indexAnalyzers.getDefaultSearchAnalyzer().name()); + assertEquals("my_search_quote_analyzer", indexAnalyzers.getDefaultSearchQuoteAnalyzer().name()); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index 7e216c37686ee..bc59c59aa54ab 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -40,6 +40,9 @@ import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_ANALYZER_NAME; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_ANALYZER_NAME; +import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_QUOTED_ANALYZER_NAME; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -57,22 +60,20 @@ public void testParseTextFieldCheckAnalyzerAnalysisMode() { Mapper.TypeParser.ParserContext parserContext = mock(Mapper.TypeParser.ParserContext.class); // check AnalysisMode.ALL works - Map analyzers = new HashMap<>(); + Map analyzers = defaultAnalyzers(); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", AnalysisMode.ALL))); - IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, - null, analyzers, Collections.emptyMap(), Collections.emptyMap()); + IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext); // check that "analyzer" set to something that only supports AnalysisMode.SEARCH_TIME or AnalysisMode.INDEX_TIME is blocked AnalysisMode mode = randomFrom(AnalysisMode.SEARCH_TIME, AnalysisMode.INDEX_TIME); - analyzers = new HashMap<>(); + analyzers = defaultAnalyzers(); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode))); - indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, null, analyzers, - Collections.emptyMap(), Collections.emptyMap()); + indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); MapperException ex = expectThrows(MapperException.class, () -> TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext)); @@ -80,6 +81,14 @@ public void testParseTextFieldCheckAnalyzerAnalysisMode() { ex.getMessage()); } + private static Map defaultAnalyzers() { + Map analyzers = new HashMap<>(); + analyzers.put(DEFAULT_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, null)); + analyzers.put(DEFAULT_SEARCH_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, null)); + analyzers.put(DEFAULT_SEARCH_QUOTED_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, null)); + return analyzers; + } + public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() { TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField"); for (String settingToTest : new String[] { "search_analyzer", "search_quote_analyzer" }) { @@ -92,25 +101,23 @@ public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() { Mapper.TypeParser.ParserContext parserContext = mock(Mapper.TypeParser.ParserContext.class); // check AnalysisMode.ALL and AnalysisMode.SEARCH_TIME works - Map analyzers = new HashMap<>(); + Map analyzers = defaultAnalyzers(); AnalysisMode mode = randomFrom(AnalysisMode.ALL, AnalysisMode.SEARCH_TIME); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode))); analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer())); - IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, - null, analyzers, Collections.emptyMap(), Collections.emptyMap()); + IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext); // check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked mode = AnalysisMode.INDEX_TIME; - analyzers = new HashMap<>(); + analyzers = defaultAnalyzers(); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode))); analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer())); - indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, null, - analyzers, Collections.emptyMap(), Collections.emptyMap()); + indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); MapperException ex = expectThrows(MapperException.class, () -> TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext)); @@ -127,11 +134,10 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { // check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked if there is no search analyzer AnalysisMode mode = AnalysisMode.INDEX_TIME; - Map analyzers = new HashMap<>(); + Map analyzers = defaultAnalyzers(); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode))); - IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, - null, analyzers, Collections.emptyMap(), Collections.emptyMap()); + IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); MapperException ex = expectThrows(MapperException.class, () -> TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext)); @@ -140,14 +146,13 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { // check AnalysisMode.INDEX_TIME is okay if search analyzer is also set fieldNode.put("search_analyzer", "standard"); - analyzers = new HashMap<>(); + analyzers = defaultAnalyzers(); mode = randomFrom(AnalysisMode.ALL, AnalysisMode.INDEX_TIME); analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode))); analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer())); - indexAnalyzers = new IndexAnalyzers(indexSettings, new NamedAnalyzer("default", AnalyzerScope.INDEX, null), null, null, analyzers, - Collections.emptyMap(), Collections.emptyMap()); + indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 7090419e9f23d..ced2a7bff78d7 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -60,9 +61,9 @@ long appliedOperations() { } public TranslogHandler(NamedXContentRegistry xContentRegistry, IndexSettings indexSettings) { - NamedAnalyzer defaultAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); - IndexAnalyzers indexAnalyzers = - new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, emptyMap(), emptyMap(), emptyMap()); + Map analyzers = new HashMap<>(); + analyzers.put(AnalysisRegistry.DEFAULT_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer())); + IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, analyzers, emptyMap(), emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, null, emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(emptyList()).getMapperRegistry(); mapperService = new MapperService(indexSettings, indexAnalyzers, xContentRegistry, similarityService, mapperRegistry, From 74a743829799b64971e0ac5ae265f43f6c14e074 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Fri, 10 May 2019 19:38:11 +0300 Subject: [PATCH 080/321] Prevent order being lost for _nodes API filters (#42045) * Switch to using a list instead of a Set for the filters, so that the order of these filters is kept. --- .../admin/cluster/RestNodesInfoAction.java | 21 ++- .../cluster/RestNodesInfoActionTests.java | 143 ++++++++++++++++++ 2 files changed, 156 insertions(+), 8 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoActionTests.java diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java index bacc698b2a4f7..20370b27d4373 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoAction.java @@ -36,7 +36,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestNodesInfoAction extends BaseRestHandler { - private static final Set ALLOWED_METRICS = Sets.newHashSet( + static final Set ALLOWED_METRICS = Sets.newHashSet( "http", "ingest", "indices", @@ -69,6 +69,13 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final NodesInfoRequest nodesInfoRequest = prepareRequest(request); + settingsFilter.addFilterSettingParams(request); + + return channel -> client.admin().cluster().nodesInfo(nodesInfoRequest, new NodesResponseRestListener<>(channel)); + } + + static NodesInfoRequest prepareRequest(final RestRequest request) { String[] nodeIds; Set metrics; @@ -76,17 +83,18 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC // still, /_nodes/_local (or any other node id) should work and be treated as usual // this means one must differentiate between allowed metrics and arbitrary node ids in the same place if (request.hasParam("nodeId") && !request.hasParam("metrics")) { - Set metricsOrNodeIds = Strings.tokenizeByCommaToSet(request.param("nodeId", "_all")); + String nodeId = request.param("nodeId", "_all"); + Set metricsOrNodeIds = Strings.tokenizeByCommaToSet(nodeId); boolean isMetricsOnly = ALLOWED_METRICS.containsAll(metricsOrNodeIds); if (isMetricsOnly) { nodeIds = new String[]{"_all"}; metrics = metricsOrNodeIds; } else { - nodeIds = metricsOrNodeIds.toArray(new String[]{}); + nodeIds = Strings.tokenizeToStringArray(nodeId, ","); metrics = Sets.newHashSet("_all"); } } else { - nodeIds = Strings.splitStringByCommaToArray(request.param("nodeId", "_all")); + nodeIds = Strings.tokenizeToStringArray(request.param("nodeId", "_all"), ","); metrics = Strings.tokenizeByCommaToSet(request.param("metrics", "_all")); } @@ -108,10 +116,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC nodesInfoRequest.ingest(metrics.contains("ingest")); nodesInfoRequest.indices(metrics.contains("indices")); } - - settingsFilter.addFilterSettingParams(request); - - return channel -> client.admin().cluster().nodesInfo(nodesInfoRequest, new NodesResponseRestListener<>(channel)); + return nodesInfoRequest; } @Override diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoActionTests.java new file mode 100644 index 0000000000000..d757ee095cdc1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesInfoActionTests.java @@ -0,0 +1,143 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.rest.action.admin.cluster.RestNodesInfoAction.ALLOWED_METRICS; + +public class RestNodesInfoActionTests extends ESTestCase { + + public void testDuplicatedFiltersAreNotRemoved() { + Map params = new HashMap<>(); + params.put("nodeId", "_all,master:false,_all"); + + RestRequest restRequest = buildRestRequest(params); + NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); + assertArrayEquals(new String[] { "_all", "master:false", "_all" }, actual.nodesIds()); + } + + public void testOnlyMetrics() { + Map params = new HashMap<>(); + int metricsCount = randomIntBetween(1, ALLOWED_METRICS.size()); + List metrics = new ArrayList<>(); + + for(int i = 0; i < metricsCount; i++) { + metrics.add(randomFrom(ALLOWED_METRICS)); + } + params.put("nodeId", String.join(",", metrics)); + + RestRequest restRequest = buildRestRequest(params); + NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); + assertArrayEquals(new String[] { "_all" }, actual.nodesIds()); + assertMetrics(metrics, actual); + } + + public void testAllMetricsSelectedWhenNodeAndMetricSpecified() { + Map params = new HashMap<>(); + String nodeId = randomValueOtherThanMany(ALLOWED_METRICS::contains, () -> randomAlphaOfLength(23)); + String metric = randomFrom(ALLOWED_METRICS); + + params.put("nodeId", nodeId + "," + metric); + RestRequest restRequest = buildRestRequest(params); + + NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); + assertArrayEquals(new String[] { nodeId, metric }, actual.nodesIds()); + assertAllMetricsTrue(actual); + } + + public void testSeparateNodeIdsAndMetrics() { + Map params = new HashMap<>(); + List nodeIds = new ArrayList<>(5); + List metrics = new ArrayList<>(5); + + for(int i = 0; i < 5; i++) { + nodeIds.add(randomValueOtherThanMany(ALLOWED_METRICS::contains, () -> randomAlphaOfLength(23))); + metrics.add(randomFrom(ALLOWED_METRICS)); + } + + params.put("nodeId", String.join(",", nodeIds)); + params.put("metrics", String.join(",", metrics)); + RestRequest restRequest = buildRestRequest(params); + + NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); + assertArrayEquals(nodeIds.toArray(), actual.nodesIds()); + assertMetrics(metrics, actual); + } + + public void testExplicitAllMetrics() { + Map params = new HashMap<>(); + List nodeIds = new ArrayList<>(5); + + for(int i = 0; i < 5; i++) { + nodeIds.add(randomValueOtherThanMany(ALLOWED_METRICS::contains, () -> randomAlphaOfLength(23))); + } + + params.put("nodeId", String.join(",", nodeIds)); + params.put("metrics", "_all"); + RestRequest restRequest = buildRestRequest(params); + + NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); + assertArrayEquals(nodeIds.toArray(), actual.nodesIds()); + assertAllMetricsTrue(actual); + } + + private FakeRestRequest buildRestRequest(Map params) { + return new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath("/_nodes") + .withParams(params) + .build(); + } + + private void assertMetrics(List metrics, NodesInfoRequest nodesInfoRequest) { + assertTrue((metrics.contains("http") && nodesInfoRequest.http()) || metrics.contains("http") == false); + assertTrue((metrics.contains("ingest") && nodesInfoRequest.ingest()) || metrics.contains("ingest") == false); + assertTrue((metrics.contains("indices") && nodesInfoRequest.indices()) || metrics.contains("indices") == false); + assertTrue((metrics.contains("jvm") && nodesInfoRequest.jvm()) || metrics.contains("jvm") == false); + assertTrue((metrics.contains("os") && nodesInfoRequest.os()) || metrics.contains("os") == false); + assertTrue((metrics.contains("plugins") && nodesInfoRequest.plugins()) || metrics.contains("plugins") == false); + assertTrue((metrics.contains("process") && nodesInfoRequest.process()) || metrics.contains("process") == false); + assertTrue((metrics.contains("settings") && nodesInfoRequest.settings()) || metrics.contains("settings") == false); + assertTrue((metrics.contains("thread_pool") && nodesInfoRequest.threadPool()) || metrics.contains("thread_pool") == false); + assertTrue((metrics.contains("transport") && nodesInfoRequest.transport()) || metrics.contains("transport") == false); + } + + private void assertAllMetricsTrue(NodesInfoRequest nodesInfoRequest) { + assertTrue(nodesInfoRequest.http()); + assertTrue(nodesInfoRequest.ingest()); + assertTrue(nodesInfoRequest.indices()); + assertTrue(nodesInfoRequest.jvm()); + assertTrue(nodesInfoRequest.os()); + assertTrue(nodesInfoRequest.plugins()); + assertTrue(nodesInfoRequest.process()); + assertTrue(nodesInfoRequest.settings()); + assertTrue(nodesInfoRequest.threadPool()); + assertTrue(nodesInfoRequest.transport()); + } +} From 5cbce3124a293ccb2556ca7b9133454acf90aa63 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 10 May 2019 11:00:17 -0700 Subject: [PATCH 081/321] Cleanup plugin bin directories (#41907) This commit adds deletion of the bin directory to postrm cleanup. While the package's bin files are cleaned up by the package manager, plugins may have created subdirectories under bin. We already cleanup plugins, but not the extra bin dirs their installation created. closes #18109 --- distribution/packages/src/common/scripts/postrm | 8 +++++++- .../org/elasticsearch/packaging/test/PackageTestCase.java | 3 +++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/distribution/packages/src/common/scripts/postrm b/distribution/packages/src/common/scripts/postrm index a3cb5e1208fe7..c54df43450af4 100644 --- a/distribution/packages/src/common/scripts/postrm +++ b/distribution/packages/src/common/scripts/postrm @@ -8,7 +8,6 @@ # On RedHat, # $1=0 : indicates a removal # $1=1 : indicates an upgrade - REMOVE_DIRS=false REMOVE_USER_AND_GROUP=false @@ -55,6 +54,13 @@ if [ "$REMOVE_DIRS" = "true" ]; then echo " OK" fi + # plugins may have contained bin files + if [ -d /usr/share/elasticsearch/bin ]; then + echo -n "Deleting plugin bin directories..." + rm -rf /usr/share/elasticsearch/bin + echo " OK" + fi + if [ -d /var/run/elasticsearch ]; then echo -n "Deleting PID directory..." rm -rf /var/run/elasticsearch diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java index 1b43ebeb00a97..245234baf2012 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java @@ -169,6 +169,9 @@ public void test40StartServer() throws Exception { public void test50Remove() throws Exception { assumeThat(installation, is(notNullValue())); + // add fake bin directory as if a plugin was installed + Files.createDirectories(installation.bin.resolve("myplugin")); + remove(distribution()); // removing must stop the service From 7a88c4ae9113f95be7d31d9fd661bd87fc878a87 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 10 May 2019 11:07:29 -0700 Subject: [PATCH 082/321] Fix debian-8 update (#42056) On debian-8, when trying to apt-get update, it currently (sometimes) fails on one of the extra repositories. This failure to update causes keys to not be updated, which later can cause some packages to not install due to lack of key verification. This commit removes the troublesome repository before we attemp to update. closes #42017 --- Vagrantfile | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 3bc29005f9b01..0e94a849350e3 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -67,7 +67,10 @@ Vagrant.configure(2) do |config| 'debian-8'.tap do |box| config.vm.define box, define_opts do |config| config.vm.box = 'elastic/debian-8-x86_64' - deb_common config, box + deb_common config, box, extra: <<-SHELL + # this sometimes gets a bad ip, and doesn't appear to be needed + rm /etc/apt/sources.list.d/http_debian_net_debian.list + SHELL end end 'debian-9'.tap do |box| @@ -159,8 +162,8 @@ def deb_common(config, name, extra: '') s.inline = "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile" end extra_with_lintian = <<-SHELL - install lintian #{extra} + install lintian SHELL linux_common( config, From f97606e1b86a580bbb3e12208a173742a629af3e Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 10 May 2019 11:16:57 -0700 Subject: [PATCH 083/321] Don't create tempdir for cli scripts (#41913) The elasticsearch-cli helper script does not use the tempdir created by elasticsearch-env, yet the env script still creates it. This can lead to lots of temp directories being created when running cli scripts in an automated fashion. This commit passes a fake tmpdir to the env script to avoid creation. closes #34445 --- distribution/src/bin/elasticsearch | 4 ++++ distribution/src/bin/elasticsearch-env | 4 ---- distribution/src/bin/elasticsearch-env.bat | 3 --- distribution/src/bin/elasticsearch.bat | 4 ++++ 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/distribution/src/bin/elasticsearch b/distribution/src/bin/elasticsearch index 6843607efa19b..b7ed2b648b76f 100755 --- a/distribution/src/bin/elasticsearch +++ b/distribution/src/bin/elasticsearch @@ -16,6 +16,10 @@ source "`dirname "$0"`"/elasticsearch-env +if [ -z "$ES_TMPDIR" ]; then + ES_TMPDIR=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.TempDirectory` +fi + ES_JVM_OPTIONS="$ES_PATH_CONF"/jvm.options JVM_OPTIONS=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.JvmOptionsParser "$ES_JVM_OPTIONS"` ES_JAVA_OPTS="${JVM_OPTIONS//\$\{ES_TMPDIR\}/$ES_TMPDIR}" diff --git a/distribution/src/bin/elasticsearch-env b/distribution/src/bin/elasticsearch-env index 2a490622b34b4..78cb503ecef7c 100644 --- a/distribution/src/bin/elasticsearch-env +++ b/distribution/src/bin/elasticsearch-env @@ -84,8 +84,4 @@ ES_DISTRIBUTION_FLAVOR=${es.distribution.flavor} ES_DISTRIBUTION_TYPE=${es.distribution.type} ES_BUNDLED_JDK=${es.bundled_jdk} -if [ -z "$ES_TMPDIR" ]; then - ES_TMPDIR=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.TempDirectory` -fi - cd "$ES_HOME" diff --git a/distribution/src/bin/elasticsearch-env.bat b/distribution/src/bin/elasticsearch-env.bat index f1cdc2fd22457..8ac141986a4a7 100644 --- a/distribution/src/bin/elasticsearch-env.bat +++ b/distribution/src/bin/elasticsearch-env.bat @@ -64,6 +64,3 @@ if defined JAVA_OPTS ( rem check the Java version %JAVA% -cp "%ES_CLASSPATH%" "org.elasticsearch.tools.java_version_checker.JavaVersionChecker" || exit /b 1 -if not defined ES_TMPDIR ( - for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.TempDirectory"`) do set ES_TMPDIR=%%a -) diff --git a/distribution/src/bin/elasticsearch.bat b/distribution/src/bin/elasticsearch.bat index f14185ddc4a27..8ef77ac4c7fe9 100644 --- a/distribution/src/bin/elasticsearch.bat +++ b/distribution/src/bin/elasticsearch.bat @@ -41,6 +41,10 @@ IF ERRORLEVEL 1 ( EXIT /B %ERRORLEVEL% ) +if not defined ES_TMPDIR ( + for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.TempDirectory"`) do set ES_TMPDIR=%%a +) + set ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options @setlocal for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" ^|^| echo jvm_options_parser_failed`) do set JVM_OPTIONS=%%a From afba8870bdfb6d5bc4be712994a1347800f0a2ab Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 10 May 2019 14:27:16 -0400 Subject: [PATCH 084/321] Remove global checkpoint assertion in peer recovery (#41987) If remote recovery copies an index commit which has gaps in sequence numbers to a follower; then these assertions (introduced in #40823) don't hold for follower replicas. Closes #41037 --- .../elasticsearch/indices/recovery/RecoveryTarget.java | 8 -------- 1 file changed, 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index ce1eb3ac85589..1f2c9a0f578cc 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -26,7 +26,6 @@ import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.UUIDs; @@ -39,7 +38,6 @@ import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLeases; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardNotRecoveringException; import org.elasticsearch.index.shard.IndexShardState; @@ -288,9 +286,6 @@ public void prepareForTranslogOperations(boolean fileBasedRecovery, int totalTra ActionListener.completeWith(listener, () -> { state().getTranslog().totalOperations(totalTranslogOps); indexShard().openEngineAndSkipTranslogRecovery(); - assert indexShard.getGlobalCheckpoint() >= indexShard.seqNoStats().getMaxSeqNo() || - indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_2_0) - : "global checkpoint is not initialized [" + indexShard.seqNoStats() + "]"; return null; }); } @@ -396,9 +391,6 @@ public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.Metada store.incRef(); try { store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData); - assert globalCheckpoint >= Long.parseLong(sourceMetaData.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO)) - || indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_2_0) : - "invalid global checkpoint[" + globalCheckpoint + "] source_meta_data [" + sourceMetaData.getCommitUserData() + "]"; final String translogUUID = Translog.createEmptyTranslog( indexShard.shardPath().resolveTranslog(), globalCheckpoint, shardId, indexShard.getPendingPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); From 0192fe7d7cb58f9f5ae0ffcec26fcf9f0890916e Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Fri, 10 May 2019 15:27:41 -0400 Subject: [PATCH 085/321] Add documentation for calendar/fixed intervals (#41919) Original PR missed documentation for the new calendar/fixed intervals. This adds the missing documentation --- .../bucket/datehistogram-aggregation.asciidoc | 284 ++++++++++++------ 1 file changed, 197 insertions(+), 87 deletions(-) diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index 2ee40b24a8548..2ee9025b6ded8 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -10,122 +10,194 @@ that here the interval can be specified using date/time expressions. Time-based data requires special support because time-based intervals are not always a fixed length. -==== Setting intervals - -There seems to be no limit to the creativity we humans apply to setting our -clocks and calendars. We've invented leap years and leap seconds, standard and -daylight savings times, and timezone offsets of 30 or 45 minutes rather than a -full hour. While these creations help keep us in sync with the cosmos and our -environment, they can make specifying time intervals accurately a real challenge. -The only universal truth our researchers have yet to disprove is that a -millisecond is always the same duration, and a second is always 1000 milliseconds. -Beyond that, things get complicated. - -Generally speaking, when you specify a single time unit, such as 1 hour or 1 day, you -are working with a _calendar interval_, but multiples, such as 6 hours or 3 days, are -_fixed-length intervals_. - -For example, a specification of 1 day (1d) from now is a calendar interval that -means "at -this exact time tomorrow" no matter the length of the day. A change to or from -daylight savings time that results in a 23 or 25 hour day is compensated for and the -specification of "this exact time tomorrow" is maintained. But if you specify 2 or -more days, each day must be of the same fixed duration (24 hours). In this case, if -the specified interval includes the change to or from daylight savings time, the -interval will end an hour sooner or later than you expect. - -There are similar differences to consider when you specify single versus multiple -minutes or hours. Multiple time periods longer than a day are not supported. - -Here are the valid time specifications and their meanings: +==== Calendar and Fixed intervals -milliseconds (ms) :: -Fixed length interval; supports multiples. +When configuring a date histogram aggregation, the interval can be specified +in two manners: calendar-aware time intervals, and fixed time intervals. -seconds (s) :: -1000 milliseconds; fixed length interval (except for the last second of a -minute that contains a leap-second, which is 2000ms long); supports multiples. +Calendar-aware intervals understand that daylight savings changes the length +of specific days, months have different amounts of days, and leap seconds can +be tacked onto a particular year. -minutes (m) :: +Fixed intervals are, by contrast, always multiples of SI units and do not change +based on calendaring context. + +[NOTE] +.Combined `interval` field is deprecated +================================== +deprecated[7.2, `interval` field is deprecated] Historically both calendar and fixed +intervals were configured in a single `interval` field, which led to confusing +semantics. Specifying `1d` would be assumed as a calendar-aware time, +whereas `2d` would be interpreted as fixed time. To get "one day" of fixed time, +the user would need to specify the next smaller unit (in this case, `24h`). + +This combined behavior was often unknown to users, and even when knowledgeable about +the behavior it was difficult to use and confusing. + +This behavior has been deprecated in favor of two new, explicit fields: `calendar_interval` +and `fixed_interval`. + +By forcing a choice between calendar and intervals up front, the semantics of the interval +are clear to the user immediately and there is no ambiguity. The old `interval` field +will be removed in the future. +================================== + +===== Calendar Intervals + +Calendar-aware intervals are configured with the `calendar_interval` parameter. +Calendar intervals can only be specified in "singular" quantities of the unit +(`1d`, `1M`, etc). Multiples, such as `2d`, are not supported and will throw an exception. + +The accepted units for calendar intervals are: + +minute (`m`, `1m`) :: All minutes begin at 00 seconds. -* One minute (1m) is the interval between 00 seconds of the first minute and 00 +One minute is the interval between 00 seconds of the first minute and 00 seconds of the following minute in the specified timezone, compensating for any -intervening leap seconds, so that the number of minutes and seconds past the -hour is the same at the start and end. -* Multiple minutes (__n__m) are intervals of exactly 60x1000=60,000 milliseconds -each. +intervening leap seconds, so that the number of minutes and seconds past the +hour is the same at the start and end. -hours (h) :: +hours (`h`, `1h`) :: All hours begin at 00 minutes and 00 seconds. -* One hour (1h) is the interval between 00:00 minutes of the first hour and 00:00 +One hour (1h) is the interval between 00:00 minutes of the first hour and 00:00 minutes of the following hour in the specified timezone, compensating for any intervening leap seconds, so that the number of minutes and seconds past the hour -is the same at the start and end. -* Multiple hours (__n__h) are intervals of exactly 60x60x1000=3,600,000 milliseconds -each. +is the same at the start and end. -days (d) :: + +days (`d`, `1d`) :: All days begin at the earliest possible time, which is usually 00:00:00 (midnight). -* One day (1d) is the interval between the start of the day and the start of +One day (1d) is the interval between the start of the day and the start of of the following day in the specified timezone, compensating for any intervening time changes. -* Multiple days (__n__d) are intervals of exactly 24x60x60x1000=86,400,000 -milliseconds each. -weeks (w) :: +week (`w`, `1w`) :: -* One week (1w) is the interval between the start day_of_week:hour:minute:second -and the same day of the week and time of the following week in the specified +One week is the interval between the start day_of_week:hour:minute:second +and the same day of the week and time of the following week in the specified timezone. -* Multiple weeks (__n__w) are not supported. -months (M) :: +month (`M`, `1M`) :: -* One month (1M) is the interval between the start day of the month and time of +One month is the interval between the start day of the month and time of day and the same day of the month and time of the following month in the specified timezone, so that the day of the month and time of day are the same at the start and end. -* Multiple months (__n__M) are not supported. -quarters (q) :: +quarter (`q`, `1q`) :: -* One quarter (1q) is the interval between the start day of the month and +One quarter (1q) is the interval between the start day of the month and time of day and the same day of the month and time of day three months later, so that the day of the month and time of day are the same at the start and end. + -* Multiple quarters (__n__q) are not supported. -years (y) :: +year (`y`, `1y`) :: -* One year (1y) is the interval between the start day of the month and time of -day and the same day of the month and time of day the following year in the +One year (1y) is the interval between the start day of the month and time of +day and the same day of the month and time of day the following year in the specified timezone, so that the date and time are the same at the start and end. + -* Multiple years (__n__y) are not supported. -NOTE: -In all cases, when the specified end time does not exist, the actual end time is -the closest available time after the specified end. +===== Calendar Interval Examples +As an example, here is an aggregation requesting bucket intervals of a month in calendar time: -Widely distributed applications must also consider vagaries such as countries that -start and stop daylight savings time at 12:01 A.M., so end up with one minute of -Sunday followed by an additional 59 minutes of Saturday once a year, and countries -that decide to move across the international date line. Situations like -that can make irregular timezone offsets seem easy. +[source,js] +-------------------------------------------------- +POST /sales/_search?size=0 +{ + "aggs" : { + "sales_over_time" : { + "date_histogram" : { + "field" : "date", + "calendar_interval" : "month" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] -As always, rigorous testing, especially around time-change events, will ensure -that your time interval specification is -what you intend it to be. +If you attempt to use multiples of calendar units, the aggregation will fail because only +singular calendar units are supported: -WARNING: -To avoid unexpected results, all connected servers and clients must sync to a -reliable network time service. +[source,js] +-------------------------------------------------- +POST /sales/_search?size=0 +{ + "aggs" : { + "sales_over_time" : { + "date_histogram" : { + "field" : "date", + "calendar_interval" : "2d" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] +// TEST[catch:bad_request] -==== Examples +[source,js] +-------------------------------------------------- +{ + "error" : { + "root_cause" : [...], + "type" : "x_content_parse_exception", + "reason" : "[1:82] [date_histogram] failed to parse field [calendar_interval]", + "caused_by" : { + "type" : "illegal_argument_exception", + "reason" : "The supplied interval [2d] could not be parsed as a calendar interval.", + "stack_trace" : "java.lang.IllegalArgumentException: The supplied interval [2d] could not be parsed as a calendar interval." + } + } +} + +-------------------------------------------------- +// NOTCONSOLE + +===== Fixed Intervals + +Fixed intervals are configured with the `fixed_interval` parameter. + +In contrast to calendar-aware intervals, fixed intervals are a fixed number of SI +units and never deviate, regardless of where they fall on the calendar. One second +is always composed of 1000ms. This allows fixed intervals to be specified in +any multiple of the supported units. + +However, it means fixed intervals cannot express other units such as months, +since the duration of a month is not a fixed quantity. Attempting to specify +a calendar interval like month or quarter will throw an exception. + +The accepted units for fixed intervals are: + +milliseconds (ms) :: + +seconds (s) :: +Defined as 1000 milliseconds each + +minutes (m) :: +All minutes begin at 00 seconds. -Requesting bucket intervals of a month. +Defined as 60 seconds each (60,000 milliseconds) + +hours (h) :: +All hours begin at 00 minutes and 00 seconds. +Defined as 60 minutes each (3,600,000 milliseconds) + +days (d) :: +All days begin at the earliest possible time, which is usually 00:00:00 +(midnight). + +Defined as 24 hours (86,400,000 milliseconds) + +===== Fixed Interval Examples + +If we try to recreate the "month" `calendar_interval` from earlier, we can approximate that with +30 fixed days: [source,js] -------------------------------------------------- @@ -135,7 +207,7 @@ POST /sales/_search?size=0 "sales_over_time" : { "date_histogram" : { "field" : "date", - "calendar_interval" : "month" + "fixed_interval" : "30d" } } } @@ -144,11 +216,7 @@ POST /sales/_search?size=0 // CONSOLE // TEST[setup:sales] -You can also specify time values using abbreviations supported by -<> parsing. -Note that fractional time values are not supported, but you can address this by -shifting to another -time unit (e.g., `1.5h` could instead be specified as `90m`). +But if we try to use a calendar unit that is not supported, such as weeks, we'll get an exception: [source,js] -------------------------------------------------- @@ -158,7 +226,7 @@ POST /sales/_search?size=0 "sales_over_time" : { "date_histogram" : { "field" : "date", - "fixed_interval" : "90m" + "fixed_interval" : "2w" } } } @@ -166,6 +234,50 @@ POST /sales/_search?size=0 -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[catch:bad_request] + +[source,js] +-------------------------------------------------- +{ + "error" : { + "root_cause" : [...], + "type" : "x_content_parse_exception", + "reason" : "[1:82] [date_histogram] failed to parse field [fixed_interval]", + "caused_by" : { + "type" : "illegal_argument_exception", + "reason" : "failed to parse setting [date_histogram.fixedInterval] with value [2w] as a time value: unit is missing or unrecognized", + "stack_trace" : "java.lang.IllegalArgumentException: failed to parse setting [date_histogram.fixedInterval] with value [2w] as a time value: unit is missing or unrecognized" + } + } +} + +-------------------------------------------------- +// NOTCONSOLE + +===== Notes + +In all cases, when the specified end time does not exist, the actual end time is +the closest available time after the specified end. + +Widely distributed applications must also consider vagaries such as countries that +start and stop daylight savings time at 12:01 A.M., so end up with one minute of +Sunday followed by an additional 59 minutes of Saturday once a year, and countries +that decide to move across the international date line. Situations like +that can make irregular timezone offsets seem easy. + +As always, rigorous testing, especially around time-change events, will ensure +that your time interval specification is +what you intend it to be. + +WARNING: +To avoid unexpected results, all connected servers and clients must sync to a +reliable network time service. + +NOTE: fractional time values are not supported, but you can address this by +shifting to another time unit (e.g., `1.5h` could instead be specified as `90m`). + +NOTE: You can also specify time values using abbreviations supported by +<> parsing. ===== Keys @@ -522,8 +634,6 @@ control the order using the `order` setting. This setting supports the same `order` functionality as <>. -deprecated[6.0.0, Use `_key` instead of `_time` to order buckets by their dates/keys] - ===== Using a script to aggregate by day of the week When you need to aggregate the results by day of the week, use a script that From ac34af563cde1f04fb58336553b062a28eab1832 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 10 May 2019 15:51:12 -0500 Subject: [PATCH 086/321] [ML] adds geo_centroid aggregation support to data frames (#42088) --- .../integration/DataFramePivotRestIT.java | 50 +++++++++++++++++++ .../integration/DataFrameRestTestCase.java | 8 ++- .../pivot/AggregationResultUtils.java | 3 ++ .../transforms/pivot/Aggregations.java | 1 + .../transforms/pivot/AggregationsTests.java | 6 ++- 5 files changed, 66 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 71f0dc248b8b6..770eaec7bd141 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -428,6 +428,56 @@ public void testPivotWithBucketScriptAgg() throws Exception { assertEquals(3.878048780, actual.doubleValue(), 0.000001); } + public void testPivotWithGeoCentroidAgg() throws Exception { + String transformId = "geoCentroidPivot"; + String dataFrameIndex = "geo_centroid_pivot_reviews"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + + final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, + BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + + String config = "{" + + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," + + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + config += " \"pivot\": {" + + " \"group_by\": {" + + " \"reviewer\": {" + + " \"terms\": {" + + " \"field\": \"user_id\"" + + " } } }," + + " \"aggregations\": {" + + " \"avg_rating\": {" + + " \"avg\": {" + + " \"field\": \"stars\"" + + " } }," + + " \"location\": {" + + " \"geo_centroid\": {\"field\": \"location\"}" + + " } } }" + + "}"; + + createDataframeTransformRequest.setJsonEntity(config); + Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); + assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + + startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(dataFrameIndex)); + + // we expect 27 documents as there shall be 27 user_id's + Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); + + // get and check some users + Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_4"); + assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); + Number actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); + assertEquals(3.878048780, actual.doubleValue(), 0.000001); + String actualString = (String) ((List) XContentMapValues.extractValue("hits.hits._source.location", searchResult)).get(0); + String[] latlon = actualString.split(","); + assertEquals((4 + 10), Double.valueOf(latlon[0]), 0.000001); + assertEquals((4 + 15), Double.valueOf(latlon[1]), 0.000001); + } + private void assertOnePivotValue(String query, double expected) throws IOException { Map searchResult = getAsMap(query); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index 4344aa823b4cc..db07e8513cc2d 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -77,6 +77,9 @@ protected void createReviewsIndex() throws IOException { .startObject("stars") .field("type", "integer") .endObject() + .startObject("location") + .field("type", "geo_point") + .endObject() .endObject() .endObject(); } @@ -104,6 +107,7 @@ protected void createReviewsIndex() throws IOException { min = 10 + (i % 49); } int sec = 10 + (i % 49); + String location = (user + 10) + "," + (user + 15); String date_string = "2017-01-" + day + "T" + hour + ":" + min + ":" + sec + "Z"; bulk.append("{\"user_id\":\"") @@ -114,7 +118,9 @@ protected void createReviewsIndex() throws IOException { .append(business) .append("\",\"stars\":") .append(stars) - .append(",\"timestamp\":\"") + .append(",\"location\":\"") + .append(location) + .append("\",\"timestamp\":\"") .append(date_string) .append("\"}\n"); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java index 8c4fa96a144ec..f8857591b2322 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java @@ -13,6 +13,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.elasticsearch.search.aggregations.metrics.GeoCentroid; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation.SingleValue; import org.elasticsearch.search.aggregations.metrics.ScriptedMetric; @@ -84,6 +85,8 @@ public static Stream> extractCompositeAggregationResults(Com } } else if (aggResult instanceof ScriptedMetric) { updateDocument(document, aggName, ((ScriptedMetric) aggResult).aggregation()); + } else if (aggResult instanceof GeoCentroid) { + updateDocument(document, aggName, ((GeoCentroid) aggResult).centroid().toString()); } else { // Execution should never reach this point! // Creating transforms with unsupported aggregations shall not be possible diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java index e7257c463ce7d..615c9b2e8d2e6 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java @@ -35,6 +35,7 @@ enum AggregationType { MAX("max", SOURCE), MIN("min", SOURCE), SUM("sum", SOURCE), + GEO_CENTROID("geo_centroid", "geo_point"), SCRIPTED_METRIC("scripted_metric", DYNAMIC), BUCKET_SCRIPT("bucket_script", DYNAMIC); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java index 5fb8463ae5412..8443699430a2a 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java @@ -38,11 +38,15 @@ public void testResolveTargetMapping() { assertEquals("double", Aggregations.resolveTargetMapping("sum", "double")); assertEquals("half_float", Aggregations.resolveTargetMapping("sum", "half_float")); + // geo_centroid + assertEquals("geo_point", Aggregations.resolveTargetMapping("geo_centroid", "geo_point")); + assertEquals("geo_point", Aggregations.resolveTargetMapping("geo_centroid", null)); + // scripted_metric assertEquals("_dynamic", Aggregations.resolveTargetMapping("scripted_metric", null)); assertEquals("_dynamic", Aggregations.resolveTargetMapping("scripted_metric", "int")); - // scripted_metric + // bucket_script assertEquals("_dynamic", Aggregations.resolveTargetMapping("bucket_script", null)); assertEquals("_dynamic", Aggregations.resolveTargetMapping("bucket_script", "int")); } From 808451996480c5785fd953ae67cf2d92daafcc55 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Sat, 11 May 2019 21:38:56 -0700 Subject: [PATCH 087/321] Make packaging tests use jdk downloader (#42097) This commit removes the jdk11 download in vagrant provisioning and converts it to using the jdk downloader for the system jdk, and sets up a separate jdk for use by the test (which will be converted to running gradle in a followup). --- Vagrantfile | 20 ++---- .../gradle/vagrant/BatsOverVagrantTask.groovy | 6 +- .../gradle/vagrant/VagrantTestPlugin.groovy | 62 ++++++++++++++++--- 3 files changed, 60 insertions(+), 28 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 0e94a849350e3..c8eba2bca4a6d 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -69,7 +69,7 @@ Vagrant.configure(2) do |config| config.vm.box = 'elastic/debian-8-x86_64' deb_common config, box, extra: <<-SHELL # this sometimes gets a bad ip, and doesn't appear to be needed - rm /etc/apt/sources.list.d/http_debian_net_debian.list + rm -f /etc/apt/sources.list.d/http_debian_net_debian.list SHELL end end @@ -256,10 +256,6 @@ def linux_common(config, touch /is_vagrant_vm # for consistency between linux and windows SHELL - config.vm.provision 'jdk-11', type: 'shell', inline: <<-SHELL - curl -sSL https://download.oracle.com/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz | tar xz -C /opt/ - SHELL - # This prevents leftovers from previous tests using the # same VM from messing up the current test config.vm.provision 'clean es installs in tmp', run: 'always', type: 'shell', inline: <<-SHELL @@ -355,11 +351,10 @@ def sh_install_deps(config, return 1 } cat \<\ /etc/profile.d/java_home.sh -if [ -z "\\\$JAVA_HOME" ]; then - export JAVA_HOME=/opt/jdk-11.0.2 +if [ ! -z "\\\$JAVA_HOME" ]; then + export SYSTEM_JAVA_HOME=\\\$JAVA_HOME + unset JAVA_HOME fi -export SYSTEM_JAVA_HOME=\\\$JAVA_HOME -unset JAVA_HOME JAVA ensure tar ensure curl @@ -416,16 +411,9 @@ def windows_common(config, name) $ps_prompt | Out-File $PsHome/Microsoft.PowerShell_profile.ps1 SHELL - config.vm.provision 'windows-jdk-11', type: 'shell', inline: <<-SHELL - New-Item -ItemType Directory -Force -Path "C:/java" - Invoke-WebRequest "https://download.oracle.com/java/GA/jdk11/9/GPL/openjdk-11.0.2_windows-x64_bin.zip" -OutFile "C:/java/jdk-11.zip" - Expand-Archive -Path "C:/java/jdk-11.zip" -DestinationPath "C:/java/" - SHELL - config.vm.provision 'set env variables', type: 'shell', inline: <<-SHELL $ErrorActionPreference = "Stop" [Environment]::SetEnvironmentVariable("PACKAGING_ARCHIVES", "C:/project/build/packaging/archives", "Machine") - [Environment]::SetEnvironmentVariable("SYSTEM_JAVA_HOME", "C:\java\jdk-11.0.2", "Machine") [Environment]::SetEnvironmentVariable("PACKAGING_TESTS", "C:/project/build/packaging/tests", "Machine") [Environment]::SetEnvironmentVariable("JAVA_HOME", $null, "Machine") SHELL diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy index 110f2fc7e8461..af5d328dc0cad 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy @@ -27,15 +27,15 @@ import org.gradle.api.tasks.Input public class BatsOverVagrantTask extends VagrantCommandTask { @Input - String remoteCommand + Object remoteCommand BatsOverVagrantTask() { command = 'ssh' } - void setRemoteCommand(String remoteCommand) { + void setRemoteCommand(Object remoteCommand) { this.remoteCommand = Objects.requireNonNull(remoteCommand) - setArgs(['--command', remoteCommand]) + setArgs((Iterable) ['--command', remoteCommand]) } @Override diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 8eb200cd19b45..71c9d53467502 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -1,11 +1,18 @@ package org.elasticsearch.gradle.vagrant import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.BwcVersions import org.elasticsearch.gradle.FileContentsTask +import org.elasticsearch.gradle.Jdk +import org.elasticsearch.gradle.JdkDownloadPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.BwcVersions -import org.gradle.api.* +import org.gradle.api.GradleException +import org.gradle.api.InvalidUserDataException +import org.gradle.api.NamedDomainObjectContainer +import org.gradle.api.Plugin +import org.gradle.api.Project +import org.gradle.api.Task import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.execution.TaskExecutionAdapter import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency @@ -15,6 +22,8 @@ import org.gradle.api.tasks.Exec import org.gradle.api.tasks.StopExecutionException import org.gradle.api.tasks.TaskState +import java.nio.file.Paths + import static java.util.Collections.unmodifiableList class VagrantTestPlugin implements Plugin { @@ -85,8 +94,33 @@ class VagrantTestPlugin implements Plugin { /** extra env vars to pass to vagrant for box configuration **/ Map vagrantBoxEnvVars = [:] + private static final String SYSTEM_JDK_VERSION = "11.0.2+9" + private static final String GRADLE_JDK_VERSION = "12.0.1+12@69cfe15208a647278a19ef0990eea691" + private Jdk linuxSystemJdk; + private Jdk linuxGradleJdk; + private Jdk windowsSystemJdk; + private Jdk windowsGradleJdk; + @Override void apply(Project project) { + project.pluginManager.apply(JdkDownloadPlugin.class) + NamedDomainObjectContainer jdksContainer = (NamedDomainObjectContainer) project.getExtensions().getByName("jdks"); + linuxSystemJdk = jdksContainer.create("linux_system") { + version = SYSTEM_JDK_VERSION + platform = "linux" + } + linuxGradleJdk = jdksContainer.create("linux_gradle") { + version = GRADLE_JDK_VERSION + platform = "linux" + } + windowsSystemJdk = jdksContainer.create("windows_system") { + version = SYSTEM_JDK_VERSION + platform = "windows" + } + windowsGradleJdk = jdksContainer.create("windows_gradle") { + version = GRADLE_JDK_VERSION + platform = "windows" + } collectAvailableBoxes(project) @@ -264,7 +298,7 @@ class VagrantTestPlugin implements Plugin { } } - private static void createPrepareVagrantTestEnvTask(Project project) { + private void createPrepareVagrantTestEnvTask(Project project) { File packagingDir = new File(project.buildDir, PACKAGING_CONFIGURATION) File archivesDir = new File(packagingDir, 'archives') @@ -280,7 +314,7 @@ class VagrantTestPlugin implements Plugin { } Task createLinuxRunnerScript = project.tasks.create('createLinuxRunnerScript', FileContentsTask) { - dependsOn copyPackagingTests + dependsOn copyPackagingTests, linuxGradleJdk, linuxSystemJdk file "${testsDir}/run-tests.sh" contents """\ if [ "\$#" -eq 0 ]; then @@ -288,11 +322,15 @@ class VagrantTestPlugin implements Plugin { else test_args=( "\$@" ) fi - "\$SYSTEM_JAVA_HOME"/bin/java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}" + + if [ -z "\$SYSTEM_JAVA_HOME" ]; then + export SYSTEM_JAVA_HOME="${-> convertPath(project, linuxSystemJdk.toString()) }" + fi + "${-> convertPath(project, linuxGradleJdk.toString()) }"/bin/java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}" """ } Task createWindowsRunnerScript = project.tasks.create('createWindowsRunnerScript', FileContentsTask) { - dependsOn copyPackagingTests + dependsOn copyPackagingTests, windowsGradleJdk, windowsSystemJdk file "${testsDir}/run-tests.ps1" // the use of $args rather than param() here is deliberate because the syntax for array (multivalued) parameters is likely // a little trappy for those unfamiliar with powershell @@ -302,7 +340,8 @@ class VagrantTestPlugin implements Plugin { } else { \$testArgs = \$args } - & "\$Env:SYSTEM_JAVA_HOME"/bin/java -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs + \$Env:SYSTEM_JAVA_HOME = "${-> convertPath(project, windowsSystemJdk.toString()) }" + & "${-> convertPath(project, windowsGradleJdk.toString()) }"/bin/java -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs exit \$LASTEXITCODE """ } @@ -539,10 +578,10 @@ class VagrantTestPlugin implements Plugin { if (LINUX_BOXES.contains(box)) { Task batsPackagingTest = project.tasks.create("vagrant${boxTask}#batsPackagingTest", BatsOverVagrantTask) { - remoteCommand BATS_TEST_COMMAND + remoteCommand "export SYSTEM_JAVA_HOME=\"${-> convertPath(project, linuxSystemJdk.toString())}\"; " + BATS_TEST_COMMAND boxName box environmentVars vagrantEnvVars - dependsOn up, setupPackagingTest + dependsOn up, setupPackagingTest, linuxSystemJdk finalizedBy halt } @@ -617,4 +656,9 @@ class VagrantTestPlugin implements Plugin { } } } + + // convert the given path from an elasticsearch repo path to a VM path + private String convertPath(Project project, String path) { + return "/elasticsearch/" + project.rootDir.toPath().relativize(Paths.get(path)); + } } From 45e1e59371b4c8159d6a11999458537db414a7c2 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 13 May 2019 08:42:26 -0400 Subject: [PATCH 088/321] [DOCS] Rewrite 'rewrite' parameter docs (#42018) --- .../modules/indices/search-settings.asciidoc | 1 + .../query-dsl/multi-term-rewrite.asciidoc | 152 +++++++++++++----- 2 files changed, 109 insertions(+), 44 deletions(-) diff --git a/docs/reference/modules/indices/search-settings.asciidoc b/docs/reference/modules/indices/search-settings.asciidoc index ad75de1291cdc..30137fa382779 100644 --- a/docs/reference/modules/indices/search-settings.asciidoc +++ b/docs/reference/modules/indices/search-settings.asciidoc @@ -3,6 +3,7 @@ The following _expert_ setting can be set to manage global search limits. +[[indices-query-bool-max-clause-count]] `indices.query.bool.max_clause_count`:: Defaults to `1024`. diff --git a/docs/reference/query-dsl/multi-term-rewrite.asciidoc b/docs/reference/query-dsl/multi-term-rewrite.asciidoc index 0d327a40fdea3..391b42ea00791 100644 --- a/docs/reference/query-dsl/multi-term-rewrite.asciidoc +++ b/docs/reference/query-dsl/multi-term-rewrite.asciidoc @@ -1,45 +1,109 @@ [[query-dsl-multi-term-rewrite]] -== Multi Term Query Rewrite - -Multi term queries, like -<> and -<> are called -multi term queries and end up going through a process of rewrite. This -also happens on the -<>. -All of those queries allow to control how they will get rewritten using -the `rewrite` parameter: - -* `constant_score` (default): A rewrite method that performs like -`constant_score_boolean` when there are few matching terms and otherwise -visits all matching terms in sequence and marks documents for that term. -Matching documents are assigned a constant score equal to the query's -boost. -* `scoring_boolean`: A rewrite method that first translates each term -into a should clause in a boolean query, and keeps the scores as -computed by the query. Note that typically such scores are meaningless -to the user, and require non-trivial CPU to compute, so it's almost -always better to use `constant_score`. This rewrite method will hit -too many clauses failure if it exceeds the boolean query limit (defaults -to `1024`). -* `constant_score_boolean`: Similar to `scoring_boolean` except scores -are not computed. Instead, each matching document receives a constant -score equal to the query's boost. This rewrite method will hit too many -clauses failure if it exceeds the boolean query limit (defaults to -`1024`). -* `top_terms_N`: A rewrite method that first translates each term into -should clause in boolean query, and keeps the scores as computed by the -query. This rewrite method only uses the top scoring terms so it will -not overflow boolean max clause count. The `N` controls the size of the -top scoring terms to use. -* `top_terms_boost_N`: A rewrite method that first translates each term -into should clause in boolean query, but the scores are only computed as -the boost. This rewrite method only uses the top scoring terms so it -will not overflow the boolean max clause count. The `N` controls the -size of the top scoring terms to use. -* `top_terms_blended_freqs_N`: A rewrite method that first translates each -term into should clause in boolean query, but all term queries compute scores -as if they had the same frequency. In practice the frequency which is used -is the maximum frequency of all matching terms. This rewrite method only uses -the top scoring terms so it will not overflow boolean max clause count. The -`N` controls the size of the top scoring terms to use. +== `rewrite` Parameter + +WARNING: This parameter is for expert users only. Changing the value of +this parameter can impact search performance and relevance. + +{es} uses https://lucene.apache.org/core/[Apache Lucene] internally to power +indexing and searching. In their original form, Lucene cannot execute the +following queries: + +* <> +* <> +* <> +* <> +* <> + +To execute them, Lucene changes these queries to a simpler form, such as a +<> or a +https://en.wikipedia.org/wiki/Bit_array[bit set]. + +The `rewrite` parameter determines: + +* How Lucene calculates the relevance scores for each matching document +* Whether Lucene changes the original query to a `bool` +query or bit set +* If changed to a `bool` query, which `term` query clauses are included + +[float] +[[rewrite-param-valid-values]] +=== Valid values + +`constant_score` (Default):: +Uses the `constant_score_boolean` method for fewer matching terms. Otherwise, +this method finds all matching terms in sequence and returns matching documents +using a bit set. + +`constant_score_boolean`:: +Assigns each document a relevance score equal to the `boost` +parameter. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +This method can cause the final `bool` query to exceed the clause limit in the +<> +setting. If the query exceeds this limit, {es} returns an error. + +`scoring_boolean`:: +Calculates a relevance score for each matching document. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +This method can cause the final `bool` query to exceed the clause limit in the +<> +setting. If the query exceeds this limit, {es} returns an error. + +`top_terms_blended_freqs_N`:: +Calculates a relevance score for each matching document as if all terms had the +same frequency. This frequency is the maximum frequency of all matching terms. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query only includes `term` queries for the top `N` scoring +terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +`top_terms_boost_N`:: +Assigns each matching document a relevance score equal to the `boost` parameter. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query only includes `term` queries for the top `N` terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +`top_terms_N`:: +Calculates a relevance score for each matching document. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query +only includes `term` queries for the top `N` scoring terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +[float] +[[rewrite-param-perf-considerations]] +=== Performance considerations for the `rewrite` parameter +For most uses, we recommend using the `constant_score`, +`constant_score_boolean`, or `top_terms_boost_N` rewrite methods. + +Other methods calculate relevance scores. These score calculations are often +expensive and do not improve query results. \ No newline at end of file From 0e48bbbfc9dc74e5fe7af45b868debfd9a5cc1d1 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 13 May 2019 18:28:24 -0700 Subject: [PATCH 089/321] Remove 6.0 version constant uses (#41965) This commit removes all uses of the 6.0 version constants, since master no longer needs to know about them. --- .../client/core/MainResponseTests.java | 2 +- .../percolator/PercolatorFieldMapper.java | 2 +- .../PercolatorMatchedSlotSubFetchPhase.java | 4 +- .../PercolatorFieldMapperTests.java | 20 ---- .../percolator/QueryAnalyzerTests.java | 6 - .../test/search/10_source_filtering.yml | 14 --- .../main/java/org/elasticsearch/Version.java | 10 -- .../common/lucene/search/Queries.java | 13 +-- .../index/cache/bitset/BitsetFilterCache.java | 2 +- .../index/mapper/TypeFieldMapper.java | 2 +- .../index/query/NestedQueryBuilder.java | 4 +- .../index/shard/ShardSplittingQuery.java | 7 +- .../search/DefaultSearchContext.java | 2 +- .../bucket/nested/NestedAggregator.java | 2 +- .../nested/ReverseNestedAggregator.java | 2 +- .../search/fetch/FetchPhase.java | 4 +- .../search/sort/SortBuilder.java | 2 +- .../ClusterUpdateSettingsResponseTests.java | 7 -- .../close/CloseIndexResponseTests.java | 5 +- .../rollover/RolloverResponseTests.java | 8 -- .../action/main/MainResponseTests.java | 2 +- .../cluster/block/ClusterBlockTests.java | 41 ------- .../coordination/JoinTaskExecutorTests.java | 18 +-- .../common/lucene/search/QueriesTests.java | 20 +--- .../common/lucene/uid/VersionsTests.java | 2 +- .../elasticsearch/get/LegacyGetActionIT.java | 88 -------------- .../index/analysis/PreBuiltAnalyzerTests.java | 11 +- .../index/mapper/AllFieldMapperTests.java | 63 ----------- .../mapper/LegacyTypeFieldMapperTests.java | 41 ------- .../index/mapper/NestedObjectMapperTests.java | 56 +-------- .../index/mapper/TypeFieldTypeTests.java | 2 +- .../query/LegacyGeoShapeFieldQueryTests.java | 107 ------------------ .../search/nested/NestedSortingTests.java | 3 +- .../indices/IndicesModuleTests.java | 26 +---- .../indices/stats/LegacyIndexStatsIT.java | 104 ----------------- .../bucket/nested/NestedAggregatorTests.java | 8 +- .../org/elasticsearch/test/VersionUtils.java | 10 ++ .../elasticsearch/test/VersionUtilsTests.java | 4 +- .../section/ClientYamlTestSectionTests.java | 5 +- .../section/ClientYamlTestSuiteTests.java | 2 +- .../rest/yaml/section/SetupSectionTests.java | 2 +- .../rest/yaml/section/SkipSectionTests.java | 2 +- .../yaml/section/TeardownSectionTests.java | 4 +- .../ml/MachineLearningFeatureSetUsage.java | 11 +- .../xpack/core/ml/datafeed/AggProvider.java | 37 +----- .../xpack/core/ml/datafeed/QueryProvider.java | 35 +----- .../authz/permission/DocumentPermissions.java | 4 +- .../core/ml/datafeed/AggProviderTests.java | 69 ----------- .../core/ml/datafeed/DatafeedConfigTests.java | 4 +- .../core/ml/datafeed/DatafeedUpdateTests.java | 4 +- .../core/ml/datafeed/QueryProviderTests.java | 75 ------------ .../token/CreateTokenResponseTests.java | 54 --------- .../IndexDeprecationChecksTests.java | 4 +- .../ml/action/TransportForecastJobAction.java | 2 +- .../ml/MachineLearningFeatureSetTests.java | 18 +-- ...ransportForecastJobActionRequestTests.java | 2 +- .../authc/esnative/ReservedRealm.java | 2 +- .../filter/SecurityActionFilterTests.java | 2 +- .../authc/esnative/ReservedRealmTests.java | 28 ----- .../mapper/ExpressionRoleMappingTests.java | 2 +- .../accesscontrol/IndicesPermissionTests.java | 5 +- .../support/SecurityIndexManagerTests.java | 27 ++--- .../xpack/sql/jdbc/VersionParityTests.java | 2 +- .../WatcherIndexTemplateRegistryTests.java | 4 +- 64 files changed, 106 insertions(+), 1024 deletions(-) delete mode 100644 server/src/test/java/org/elasticsearch/get/LegacyGetActionIT.java delete mode 100644 server/src/test/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapperTests.java delete mode 100644 server/src/test/java/org/elasticsearch/index/query/LegacyGeoShapeFieldQueryTests.java delete mode 100644 server/src/test/java/org/elasticsearch/indices/stats/LegacyIndexStatsIT.java diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java index 24925e819a443..4a5cd2056655a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MainResponseTests.java @@ -38,7 +38,7 @@ protected org.elasticsearch.action.main.MainResponse createServerTestInstance() ClusterName clusterName = new ClusterName(randomAlphaOfLength(10)); String nodeName = randomAlphaOfLength(10); final String date = new Date(randomNonNegativeLong()).toString(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_1, Version.CURRENT); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); Build build = new Build( Build.Flavor.UNKNOWN, Build.Type.UNKNOWN, randomAlphaOfLength(8), date, randomBoolean(), version.toString() diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 9ad660b4e548c..9d612c0c2926b 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -261,7 +261,7 @@ Query percolateQuery(String name, PercolateQuery.QueryStore queryStore, List t = fieldType.createCandidateQuery(indexReader, Version.CURRENT); - assertTrue(t.v2()); - assertEquals(2, t.v1().clauses().size()); - assertThat(t.v1().clauses().get(0).getQuery(), instanceOf(CoveringQuery.class)); - assertThat(t.v1().clauses().get(1).getQuery(), instanceOf(TermQuery.class)); - - t = fieldType.createCandidateQuery(indexReader, Version.V_6_0_0); - assertTrue(t.v2()); - assertEquals(2, t.v1().clauses().size()); - assertThat(t.v1().clauses().get(0).getQuery(), instanceOf(TermInSetQuery.class)); - assertThat(t.v1().clauses().get(1).getQuery(), instanceOf(TermQuery.class)); - } - public void testExtractTermsAndRanges_numberFields() throws Exception { addQueryFieldMappings(); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index 544cfc6ef6193..c07467187f05f 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -333,12 +333,6 @@ public void testExtractQueryMetadata_booleanQueryWithMustNot() { assertThat(result.verified, is(false)); assertThat(result.minimumShouldMatch, equalTo(0)); assertTermsEqual(result.extractions); - - result = analyze(booleanQuery, Version.CURRENT); - assertThat(result.matchAllDocs, is(true)); - assertThat(result.verified, is(false)); - assertThat(result.minimumShouldMatch, equalTo(0)); - assertTermsEqual(result.extractions); } public void testExactMatch_booleanQuery() { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml index b15a48f52a43e..df6664141c3b3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml @@ -165,20 +165,6 @@ setup: docvalue_fields: [ "count" ] - match: { hits.hits.0.fields.count: [1] } ---- -"docvalue_fields with default format": - - skip: - features: warnings - - do: - warnings: - - "[use_field_mapping] is a special format that was only used to ease the transition to 7.x. It has become the default and shouldn't be set explicitly anymore." - search: - body: - docvalue_fields: - - field: "count" - format: "use_field_mapping" - - match: { hits.hits.0.fields.count: [1] } - --- "docvalue_fields with explicit format": diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index c2d927f457bd1..467a8b5cce177 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -46,12 +46,6 @@ public class Version implements Comparable, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - public static final int V_6_0_0_ID = 6000099; - public static final Version V_6_0_0 = - new Version(V_6_0_0_ID, org.apache.lucene.util.Version.LUCENE_7_0_1); - public static final int V_6_0_1_ID = 6000199; - public static final Version V_6_0_1 = - new Version(V_6_0_1_ID, org.apache.lucene.util.Version.LUCENE_7_0_1); public static final int V_6_1_0_ID = 6010099; public static final Version V_6_1_0 = new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_1_1_ID = 6010199; @@ -212,10 +206,6 @@ public static Version fromId(int id) { return V_6_1_1; case V_6_1_0_ID: return V_6_1_0; - case V_6_0_1_ID: - return V_6_0_1; - case V_6_0_0_ID: - return V_6_0_0; case V_EMPTY_ID: return V_EMPTY; default: diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index 56d1b5cedc33c..96a0cafc35b11 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; @@ -72,17 +71,9 @@ public static Query newNestedFilter() { /** * Creates a new non-nested docs query - * @param indexVersionCreated the index version created since newer indices can identify a parent field more efficiently */ - public static Query newNonNestedFilter(Version indexVersionCreated) { - if (indexVersionCreated.onOrAfter(Version.V_6_1_0)) { - return new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME); - } else { - return new BooleanQuery.Builder() - .add(new MatchAllDocsQuery(), Occur.FILTER) - .add(newNestedFilter(), Occur.MUST_NOT) - .build(); - } + public static Query newNonNestedFilter() { + return new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME); } public static BooleanQuery filtered(@Nullable Query query, @Nullable Query filter) { diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 091de68c514d0..b1812c40e03eb 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -251,7 +251,7 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin } if (hasNested) { - warmUp.add(Queries.newNonNestedFilter(indexSettings.getIndexVersionCreated())); + warmUp.add(Queries.newNonNestedFilter()); } final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size() * warmUp.size()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index c4d9ef966ca3d..cb17f182ef77a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -140,7 +140,7 @@ public Query termsQuery(List values, QueryShardContext context) { .anyMatch(indexType::equals)) { if (context.getMapperService().hasNested()) { // type filters are expected not to match nested docs - return Queries.newNonNestedFilter(context.indexVersionCreated()); + return Queries.newNonNestedFilter(); } else { return new MatchAllDocsQuery(); } diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index ee8062308ac11..fecf5c8407e98 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -281,7 +281,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { Query innerQuery; ObjectMapper objectMapper = context.nestedScope().getObjectMapper(); if (objectMapper == null) { - parentFilter = context.bitsetFilter(Queries.newNonNestedFilter(context.indexVersionCreated())); + parentFilter = context.bitsetFilter(Queries.newNonNestedFilter()); } else { parentFilter = context.bitsetFilter(objectMapper.nestedTypeFilter()); } @@ -388,7 +388,7 @@ public TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException { SearchHit hit = hits[i]; Query rawParentFilter; if (parentObjectMapper == null) { - rawParentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated()); + rawParentFilter = Queries.newNonNestedFilter(); } else { rawParentFilter = parentObjectMapper.nestedTypeFilter(); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index dae910e5fe3c4..4082293d9f2eb 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -41,7 +41,6 @@ import org.apache.lucene.util.BitSetIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.common.lucene.search.Queries; @@ -67,7 +66,7 @@ final class ShardSplittingQuery extends Query { ShardSplittingQuery(IndexMetaData indexMetaData, int shardId, boolean hasNested) { this.indexMetaData = indexMetaData; this.shardId = shardId; - this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetaData.getCreationVersion()) : null; + this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer() : null; } @Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) { @@ -339,9 +338,9 @@ public float matchCost() { * than once. There is no point in using BitsetFilterCache#BitSetProducerWarmer since we use this only as a delete by query which is * executed on a recovery-private index writer. There is no point in caching it and it won't have a cache hit either. */ - private static BitSetProducer newParentDocBitSetProducer(Version indexVersionCreated) { + private static BitSetProducer newParentDocBitSetProducer() { return context -> { - Query query = Queries.newNonNestedFilter(indexVersionCreated); + Query query = Queries.newNonNestedFilter(); final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index d9a7fdb831efd..f0eaa0d51dadf 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -278,7 +278,7 @@ public Query buildFilteredQuery(Query query) { && typeFilter == null // when a _type filter is set, it will automatically exclude nested docs && new NestedHelper(mapperService()).mightMatchNestedDocs(query) && (aliasFilter == null || new NestedHelper(mapperService()).mightMatchNestedDocs(aliasFilter))) { - filters.add(Queries.newNonNestedFilter(mapperService().getIndexSettings().getIndexVersionCreated())); + filters.add(Queries.newNonNestedFilter()); } if (aliasFilter != null) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 3e3c07bc42398..68e46b37bb064 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -66,7 +66,7 @@ public class NestedAggregator extends BucketsAggregator implements SingleBucketA super(name, factories, context, parentAggregator, pipelineAggregators, metaData); Query parentFilter = parentObjectMapper != null ? parentObjectMapper.nestedTypeFilter() - : Queries.newNonNestedFilter(context.mapperService().getIndexSettings().getIndexVersionCreated()); + : Queries.newNonNestedFilter(); this.parentFilter = context.bitsetFilterCache().getBitSetProducer(parentFilter); this.childFilter = childObjectMapper.nestedTypeFilter(); this.collectsFromSingleBucket = collectsFromSingleBucket; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index 415ae39c71e00..2f29f8f2cdcfc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -54,7 +54,7 @@ public ReverseNestedAggregator(String name, AggregatorFactories factories, Objec throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); if (objectMapper == null) { - parentFilter = Queries.newNonNestedFilter(context.mapperService().getIndexSettings().getIndexVersionCreated()); + parentFilter = Queries.newNonNestedFilter(); } else { parentFilter = objectMapper.nestedTypeFilter(); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index ab2f864bfce35..c23be0f4cb994 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -190,7 +190,7 @@ public void execute(SearchContext context) { private int findRootDocumentIfNested(SearchContext context, LeafReaderContext subReaderContext, int subDocId) throws IOException { if (context.mapperService().hasNested()) { BitSet bits = context.bitsetFilterCache() - .getBitSetProducer(Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated())) + .getBitSetProducer(Queries.newNonNestedFilter()) .getBitSet(subReaderContext); if (!bits.get(subDocId)) { return bits.nextSetBit(subDocId); @@ -363,7 +363,7 @@ private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context } parentFilter = nestedParentObjectMapper.nestedTypeFilter(); } else { - parentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated()); + parentFilter = Queries.newNonNestedFilter(); } Query childFilter = nestedObjectMapper.nestedTypeFilter(); diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index b4358abee0728..4d793de18443d 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -185,7 +185,7 @@ protected static Nested resolveNested(QueryShardContext context, NestedSortBuild final ObjectMapper objectMapper = context.nestedScope().getObjectMapper(); final Query parentQuery; if (objectMapper == null) { - parentQuery = Queries.newNonNestedFilter(context.indexVersionCreated()); + parentQuery = Queries.newNonNestedFilter(); } else { parentQuery = objectMapper.nestedTypeFilter(); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java index e8bd14b640dfa..485b2dc3fb4d1 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java @@ -19,16 +19,13 @@ package org.elasticsearch.action.admin.cluster.settings; -import org.elasticsearch.Version; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractStreamableXContentTestCase; -import org.elasticsearch.test.VersionUtils; -import java.io.IOException; import java.util.List; import java.util.Set; import java.util.function.Predicate; @@ -100,8 +97,4 @@ protected ClusterUpdateSettingsResponse createBlankInstance() { return new ClusterUpdateSettingsResponse(); } - public void testOldSerialisation() throws IOException { - ClusterUpdateSettingsResponse original = createTestInstance(); - assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0)); - } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java index cca95e09151ef..f86beff7738e3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.equalTo; @@ -48,7 +47,7 @@ public void testBwcSerialization() throws Exception { { final CloseIndexResponse response = randomResponse(); try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_2_0))); + out.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_1_0)); response.writeTo(out); final AcknowledgedResponse deserializedResponse = new AcknowledgedResponse(); @@ -65,7 +64,7 @@ public void testBwcSerialization() throws Exception { final CloseIndexResponse deserializedResponse = new CloseIndexResponse(); try (StreamInput in = out.bytes().streamInput()) { - in.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_2_0))); + in.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_1_0)); deserializedResponse.readFrom(in); } assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged())); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java index 0cc3f455e83df..a38de844626dc 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java @@ -20,14 +20,11 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractStreamableXContentTestCase; -import org.elasticsearch.test.VersionUtils; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -131,9 +128,4 @@ protected RolloverResponse mutateInstance(RolloverResponse response) { throw new UnsupportedOperationException(); } } - - public void testOldSerialisation() throws IOException { - RolloverResponse original = createTestInstance(); - assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0)); - } } diff --git a/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java b/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java index 1dff130fb98a6..2ee9cb9e1397e 100644 --- a/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java @@ -41,7 +41,7 @@ protected MainResponse createTestInstance() { ClusterName clusterName = new ClusterName(randomAlphaOfLength(10)); String nodeName = randomAlphaOfLength(10); final String date = new Date(randomNonNegativeLong()).toString(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_1, Version.CURRENT); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); Build build = new Build( Build.Flavor.UNKNOWN, Build.Type.UNKNOWN, randomAlphaOfLength(8), date, randomBoolean(), version.toString() diff --git a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java index 4bd6c15853aa0..51a34d94b3a05 100644 --- a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java @@ -32,9 +32,7 @@ import java.util.List; import static java.util.EnumSet.copyOf; -import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.CoreMatchers.endsWith; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; @@ -62,45 +60,6 @@ public void testSerialization() throws Exception { } } - public void testBwcSerialization() throws Exception { - for (int runs = 0; runs < randomIntBetween(5, 20); runs++) { - // Generate a random cluster block in version < 7.0.0 - final Version version = randomVersionBetween(random(), Version.V_6_0_0, getPreviousVersion(Version.V_6_7_0)); - final ClusterBlock expected = randomClusterBlock(version); - assertNull(expected.uuid()); - - // Serialize to node in current version - final BytesStreamOutput out = new BytesStreamOutput(); - expected.writeTo(out); - - // Deserialize and check the cluster block - final ClusterBlock actual = new ClusterBlock(out.bytes().streamInput()); - assertClusterBlockEquals(expected, actual); - } - - for (int runs = 0; runs < randomIntBetween(5, 20); runs++) { - // Generate a random cluster block in current version - final ClusterBlock expected = randomClusterBlock(Version.CURRENT); - - // Serialize to node in version < 7.0.0 - final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomVersionBetween(random(), Version.V_6_0_0, getPreviousVersion(Version.V_6_7_0))); - expected.writeTo(out); - - // Deserialize and check the cluster block - final StreamInput in = out.bytes().streamInput(); - in.setVersion(out.getVersion()); - final ClusterBlock actual = new ClusterBlock(in); - - assertThat(actual.id(), equalTo(expected.id())); - assertThat(actual.status(), equalTo(expected.status())); - assertThat(actual.description(), equalTo(expected.description())); - assertThat(actual.retryable(), equalTo(expected.retryable())); - assertThat(actual.disableStatePersistence(), equalTo(expected.disableStatePersistence())); - assertArrayEquals(actual.levels().toArray(), expected.levels().toArray()); - } - } - public void testToStringDanglingComma() { final ClusterBlock clusterBlock = randomClusterBlock(); assertThat(clusterBlock.toString(), not(endsWith(","))); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java index 35fa5786bbda3..e20559ca00561 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.coordination; import org.elasticsearch.Version; -import org.elasticsearch.cluster.coordination.JoinTaskExecutor; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -30,7 +29,6 @@ import org.elasticsearch.test.VersionUtils; import static org.elasticsearch.test.VersionUtils.getPreviousVersion; -import static org.elasticsearch.test.VersionUtils.incompatibleFutureVersion; import static org.elasticsearch.test.VersionUtils.maxCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomVersion; @@ -89,21 +87,9 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { }); } - if (minNodeVersion.before(Version.V_6_0_0)) { - Version tooHigh = incompatibleFutureVersion(minNodeVersion); - expectThrows(IllegalStateException.class, () -> { - if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(tooHigh, nodes); - } else { - JoinTaskExecutor.ensureNodesCompatibility(tooHigh, minNodeVersion, maxNodeVersion); - } - }); - } + Version oldMajor = Version.V_6_4_0.minimumCompatibilityVersion(); + expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); - if (minNodeVersion.onOrAfter(Version.V_7_0_0)) { - Version oldMajor = Version.V_6_4_0.minimumCompatibilityVersion(); - expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); - } final Version minGoodVersion = maxNodeVersion.major == minNodeVersion.major ? // we have to stick with the same major diff --git a/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java b/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java index a1236fd53df92..bb759848dc4b7 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java @@ -20,33 +20,21 @@ package org.elasticsearch.common.lucene.search; import org.apache.lucene.index.Term; -import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; -import org.elasticsearch.Version; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; public class QueriesTests extends ESTestCase { public void testNonNestedQuery() { - for (Version version : VersionUtils.allVersions()) { - // This is a custom query that extends AutomatonQuery and want to make sure the equals method works - assertEquals(Queries.newNonNestedFilter(version), Queries.newNonNestedFilter(version)); - assertEquals(Queries.newNonNestedFilter(version).hashCode(), Queries.newNonNestedFilter(version).hashCode()); - if (version.onOrAfter(Version.V_6_1_0)) { - assertEquals(Queries.newNonNestedFilter(version), new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME)); - } else { - assertEquals(Queries.newNonNestedFilter(version), new BooleanQuery.Builder() - .add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER) - .add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT) - .build()); - } - } + // This is a custom query that extends AutomatonQuery and want to make sure the equals method works + assertEquals(Queries.newNonNestedFilter(), Queries.newNonNestedFilter()); + assertEquals(Queries.newNonNestedFilter().hashCode(), Queries.newNonNestedFilter().hashCode()); + assertEquals(Queries.newNonNestedFilter(), new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME)); } public void testIsNegativeQuery() { diff --git a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java index b18daf07bf361..febe2b976fb47 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java @@ -202,7 +202,7 @@ public void testLuceneVersionOnUnknownVersions() { // too old version, major should be the oldest supported lucene version minus 1 version = Version.fromString("5.2.1"); - assertEquals(Version.V_6_0_0.luceneVersion.major - 1, version.luceneVersion.major); + assertEquals(VersionUtils.getFirstVersion().luceneVersion.major - 1, version.luceneVersion.major); // future version, should be the same version as today version = Version.fromString("8.77.1"); diff --git a/server/src/test/java/org/elasticsearch/get/LegacyGetActionIT.java b/server/src/test/java/org/elasticsearch/get/LegacyGetActionIT.java deleted file mode 100644 index 4382f677ad63e..0000000000000 --- a/server/src/test/java/org/elasticsearch/get/LegacyGetActionIT.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.get; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESIntegTestCase; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.get.GetActionIT.indexOrAlias; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; - -public class LegacyGetActionIT extends ESIntegTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testGetFieldsMetaDataWithRouting() throws Exception { - assertAcked(prepareCreate("test") - .addMapping("_doc", "field1", "type=keyword,store=true") - .addAlias(new Alias("alias")) - .setSettings( - Settings.builder() - .put("index.refresh_interval", -1) - .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_0_0))); // multi-types in 6.0.0 - - try (XContentBuilder source = jsonBuilder().startObject().field("field1", "value").endObject()) { - client() - .prepareIndex("test", "_doc", "1") - .setRouting("1") - .setSource(source) - .get(); - } - - { - final GetResponse getResponse = client() - .prepareGet(indexOrAlias(), "_doc", "1") - .setRouting("1") - .setStoredFields("field1") - .get(); - assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); - assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); - assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); - assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); - } - - flush(); - - { - final GetResponse getResponse = client() - .prepareGet(indexOrAlias(), "_doc", "1") - .setStoredFields("field1") - .setRouting("1") - .get(); - assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); - assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); - assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); - assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); - } - } - -} diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index cdc38cd3abd0d..65958ec9319c2 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.Collection; @@ -61,17 +62,17 @@ public void testThatDefaultAndStandardAnalyzerAreTheSameInstance() { public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() { assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT), - is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_6_0_0))); + is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT.minimumIndexCompatibilityVersion()))); } public void testThatInstancesAreCachedAndReused() { assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT), PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT)); // same es version should be cached - assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_2_1), - PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_2_1)); - assertNotSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_0_0), - PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_6_0_1)); + Version v = VersionUtils.randomVersion(random()); + assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(v), PreBuiltAnalyzers.STANDARD.getAnalyzer(v)); + assertNotSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT), + PreBuiltAnalyzers.STANDARD.getAnalyzer(VersionUtils.randomPreviousCompatibleVersion(random(), Version.CURRENT))); // Same Lucene version should be cached: assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_1), diff --git a/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java index 34200b51cb317..1a6e6e2c90aed 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -28,9 +26,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.VersionUtils; - -import static org.hamcrest.CoreMatchers.containsString; public class AllFieldMapperTests extends ESSingleNodeTestCase { @@ -39,64 +34,6 @@ protected boolean forbidPrivateIndexSettings() { return false; } - public void testAllDisabled() throws Exception { - { - final Version version = VersionUtils.randomVersionBetween(random(), - Version.V_6_0_0, Version.V_7_0_0.minimumCompatibilityVersion()); - IndexService indexService = createIndex("test_6x", - Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, version) - .build() - ); - String mappingDisabled = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("_all") - .field("enabled", false) - .endObject().endObject() - ); - indexService.mapperService().merge("_doc", new CompressedXContent(mappingDisabled), MergeReason.MAPPING_UPDATE); - - String mappingEnabled = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("_all") - .field("enabled", true) - .endObject().endObject() - ); - MapperParsingException exc = expectThrows(MapperParsingException.class, - () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingEnabled), MergeReason.MAPPING_UPDATE)); - assertThat(exc.getMessage(), containsString("[_all] is disabled in this version.")); - } - { - IndexService indexService = createIndex("test"); - String mappingEnabled = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("_all") - .field("enabled", true) - .endObject().endObject() - ); - MapperParsingException exc = expectThrows(MapperParsingException.class, - () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingEnabled), MergeReason.MAPPING_UPDATE)); - assertThat(exc.getMessage(), containsString("unsupported parameters: [_all")); - - String mappingDisabled = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("_all") - .field("enabled", false) - .endObject().endObject() - ); - exc = expectThrows(MapperParsingException.class, - () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingDisabled), MergeReason.MAPPING_UPDATE)); - assertThat(exc.getMessage(), containsString("unsupported parameters: [_all")); - - String mappingAll = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("_all").endObject().endObject() - ); - exc = expectThrows(MapperParsingException.class, - () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingAll), MergeReason.MAPPING_UPDATE)); - assertThat(exc.getMessage(), containsString("unsupported parameters: [_all")); - - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().endObject()); - indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); - assertEquals("{\"_doc\":{}}", indexService.mapperService().documentMapper("_doc").mapping().toString()); - } - } - public void testUpdateDefaultSearchAnalyzer() throws Exception { IndexService indexService = createIndex("test", Settings.builder() .put("index.analysis.analyzer.default_search.type", "custom") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapperTests.java deleted file mode 100644 index 9566e1afa6df0..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/LegacyTypeFieldMapperTests.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESSingleNodeTestCase; - -public class LegacyTypeFieldMapperTests extends ESSingleNodeTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testDocValuesMultipleTypes() throws Exception { - TypeFieldMapperTests.testDocValues(index -> { - final Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_0_0).build(); - return this.createIndex(index, settings); - }); - } - -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index d3f41589fb1fd..edca517830833 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -19,10 +19,7 @@ package org.elasticsearch.index.mapper; -import java.util.HashSet; import org.apache.lucene.index.IndexableField; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -35,12 +32,12 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -698,55 +695,4 @@ protected boolean forbidPrivateIndexSettings() { */ return false; } - - public void testReorderParentBWC() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("nested1").field("type", "nested").endObject() - .endObject().endObject().endObject()); - - Version bwcVersion = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0); - for (Version version : new Version[] {Version.V_6_5_0, bwcVersion}) { - DocumentMapper docMapper = createIndex("test-" + version, - Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), version).build()) - .mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - - assertThat(docMapper.hasNestedObjects(), equalTo(true)); - ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); - assertThat(nested1Mapper.nested().isNested(), equalTo(true)); - - ParsedDocument doc = docMapper.parse(new SourceToParse("test", "type", "1", - BytesReference.bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "value") - .startArray("nested1") - .startObject() - .field("field1", "1") - .field("field2", "2") - .endObject() - .startObject() - .field("field1", "3") - .field("field2", "4") - .endObject() - .endArray() - .endObject()), - XContentType.JSON)); - - assertThat(doc.docs().size(), equalTo(3)); - if (version.onOrAfter(Version.V_6_5_0)) { - assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); - assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1")); - assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2")); - assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("3")); - assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("4")); - assertThat(doc.docs().get(2).get("field"), equalTo("value")); - } else { - assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); - assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("3")); - assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("4")); - assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("1")); - assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("2")); - assertThat(doc.docs().get(2).get("field"), equalTo("value")); - } - } - } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java index 4e6f504e99263..79a93c04faa1f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java @@ -67,7 +67,7 @@ public void testTermsQuery() throws Exception { Mockito.when(mapperService.hasNested()).thenReturn(true); query = ft.termQuery("my_type", context); - assertEquals(Queries.newNonNestedFilter(context.indexVersionCreated()), query); + assertEquals(Queries.newNonNestedFilter(), query); mapper = Mockito.mock(DocumentMapper.class); Mockito.when(mapper.type()).thenReturn("other_type"); diff --git a/server/src/test/java/org/elasticsearch/index/query/LegacyGeoShapeFieldQueryTests.java b/server/src/test/java/org/elasticsearch/index/query/LegacyGeoShapeFieldQueryTests.java deleted file mode 100644 index dc2a4a0e3fffe..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/query/LegacyGeoShapeFieldQueryTests.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.geo.ShapeRelation; -import org.elasticsearch.common.geo.SpatialStrategy; -import org.elasticsearch.common.geo.builders.ShapeBuilder; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.geo.RandomShapeGenerator; -import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; - -import java.io.IOException; - -public class LegacyGeoShapeFieldQueryTests extends GeoShapeQueryBuilderTests { - - @Override - protected String fieldName() { - return GEO_SHAPE_FIELD_NAME; - } - - @Override - protected Settings createTestIndexSettings() { - // force the legacy shape impl - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_5_0); - return Settings.builder() - .put(super.createTestIndexSettings()) - .put(IndexMetaData.SETTING_VERSION_CREATED, version) - .build(); - } - - @Override - protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { - ShapeType shapeType = ShapeType.randomType(random()); - ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null, shapeType); - GeoShapeQueryBuilder builder; - clearShapeFields(); - if (indexedShape == false) { - builder = new GeoShapeQueryBuilder(fieldName(), shape); - } else { - indexedShapeToReturn = shape; - indexedShapeId = randomAlphaOfLengthBetween(3, 20); - builder = new GeoShapeQueryBuilder(fieldName(), indexedShapeId); - if (randomBoolean()) { - indexedShapeIndex = randomAlphaOfLengthBetween(3, 20); - builder.indexedShapeIndex(indexedShapeIndex); - } - if (randomBoolean()) { - indexedShapePath = randomAlphaOfLengthBetween(3, 20); - builder.indexedShapePath(indexedShapePath); - } - if (randomBoolean()) { - indexedShapeRouting = randomAlphaOfLengthBetween(3, 20); - builder.indexedShapeRouting(indexedShapeRouting); - } - } - if (randomBoolean()) { - SpatialStrategy strategy = randomFrom(SpatialStrategy.values()); - // ShapeType.MULTILINESTRING + SpatialStrategy.TERM can lead to large queries and will slow down tests, so - // we try to avoid that combination - while (shapeType == ShapeType.MULTILINESTRING && strategy == SpatialStrategy.TERM) { - strategy = randomFrom(SpatialStrategy.values()); - } - builder.strategy(strategy); - if (strategy != SpatialStrategy.TERM) { - builder.relation(randomFrom(ShapeRelation.values())); - } - } - - if (randomBoolean()) { - builder.ignoreUnmapped(randomBoolean()); - } - return builder; - } - - public void testInvalidRelation() throws IOException { - ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null); - GeoShapeQueryBuilder builder = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); - builder.strategy(SpatialStrategy.TERM); - expectThrows(IllegalArgumentException.class, () -> builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN))); - GeoShapeQueryBuilder builder2 = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); - builder2.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN)); - expectThrows(IllegalArgumentException.class, () -> builder2.strategy(SpatialStrategy.TERM)); - GeoShapeQueryBuilder builder3 = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); - builder3.strategy(SpatialStrategy.TERM); - expectThrows(IllegalArgumentException.class, () -> builder3.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.WITHIN))); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index 0dccf5937dca7..dd44a386329f9 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -44,7 +44,6 @@ import org.apache.lucene.search.join.ToParentBlockJoinQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.TestUtil; -import org.elasticsearch.Version; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; @@ -799,7 +798,7 @@ private static TopFieldDocs search(QueryBuilder queryBuilder, FieldSortBuilder s IndexSearcher searcher) throws IOException { Query query = new BooleanQuery.Builder() .add(queryBuilder.toQuery(queryShardContext), Occur.MUST) - .add(Queries.newNonNestedFilter(Version.CURRENT), Occur.FILTER) + .add(Queries.newNonNestedFilter(), Occur.FILTER) .build(); Sort sort = new Sort(sortBuilder.build(queryShardContext).field); return searcher.search(query, 10, sort); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index f31ac0627138e..6254449df05a6 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -98,19 +98,8 @@ public Map getMetadataMappers() { public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0.minimumCompatibilityVersion()); - assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); - assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); - Map metadataMapperParsers = - module.getMapperRegistry().getMetadataMapperParsers(version); - assertEquals(EXPECTED_METADATA_FIELDS_6x.length, metadataMapperParsers.size()); - int i = 0; - for (String field : metadataMapperParsers.keySet()) { - assertEquals(EXPECTED_METADATA_FIELDS_6x[i++], field); - } - } - { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), + Version.CURRENT.minimumIndexCompatibilityVersion(), Version.CURRENT); assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); Map metadataMapperParsers = @@ -127,15 +116,12 @@ public void testBuiltinWithPlugins() { IndicesModule noPluginsModule = new IndicesModule(Collections.emptyList()); IndicesModule module = new IndicesModule(fakePlugins); MapperRegistry registry = module.getMapperRegistry(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0.minimumCompatibilityVersion()); assertThat(registry.getMapperParsers().size(), greaterThan(noPluginsModule.getMapperRegistry().getMapperParsers().size())); - assertThat(registry.getMetadataMapperParsers(version).size(), - greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers(version).size())); - Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(version); + assertThat(registry.getMetadataMapperParsers(Version.CURRENT).size(), + greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers(Version.CURRENT).size())); + Map metadataMapperParsers = + module.getMapperRegistry().getMetadataMapperParsers(Version.CURRENT); Iterator iterator = metadataMapperParsers.keySet().iterator(); - if (version.before(Version.V_7_0_0)) { - assertEquals(AllFieldMapper.NAME, iterator.next()); - } assertEquals(IgnoredFieldMapper.NAME, iterator.next()); String last = null; while(iterator.hasNext()) { diff --git a/server/src/test/java/org/elasticsearch/indices/stats/LegacyIndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/LegacyIndexStatsIT.java deleted file mode 100644 index c8ae3edb886ec..0000000000000 --- a/server/src/test/java/org/elasticsearch/indices/stats/LegacyIndexStatsIT.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.stats; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.ESIntegTestCase; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; - -public class LegacyIndexStatsIT extends ESIntegTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testFieldDataFieldsParam() { - assertAcked(client() - .admin() - .indices() - .prepareCreate("test1") - .setSettings(Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_0_0)) - .addMapping("_doc", "bar", "type=text,fielddata=true", "baz", "type=text,fielddata=true") - .get()); - - ensureGreen(); - - client().prepareIndex("test1", "_doc", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); - client().prepareIndex("test1", "_doc", Integer.toString(2)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); - refresh(); - - client().prepareSearch("_all").addSort("bar", SortOrder.ASC).addSort("baz", SortOrder.ASC).execute().actionGet(); - - final IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); - - { - final IndicesStatsResponse stats = builder.execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields(), is(nullValue())); - } - - { - final IndicesStatsResponse stats = builder.setFieldDataFields("bar").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(false)); - } - - { - final IndicesStatsResponse stats = builder.setFieldDataFields("bar", "baz").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0L)); - } - - { - final IndicesStatsResponse stats = builder.setFieldDataFields("*").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0L)); - } - - { - final IndicesStatsResponse stats = builder.setFieldDataFields("*r").execute().actionGet(); - assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); - assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(false)); - } - - } - -} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index 96d66c9e0c269..f3c1ea7ca529a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -37,7 +37,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -57,15 +56,14 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.InternalSum; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.Min; import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.InternalSum; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; -import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -356,7 +354,7 @@ public void testResetRootDocId() throws Exception { fieldType.setName(VALUE_FIELD_NAME); BooleanQuery.Builder bq = new BooleanQuery.Builder(); - bq.add(Queries.newNonNestedFilter(VersionUtils.randomVersion(random())), BooleanClause.Occur.MUST); + bq.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST); bq.add(new TermQuery(new Term(IdFieldMapper.NAME, Uid.encodeId("2"))), BooleanClause.Occur.MUST_NOT); InternalNested nested = search(newSearcher(indexReader, false, true), @@ -638,7 +636,7 @@ public void testPreGetChildLeafCollectors() throws IOException { fieldType2.setHasDocValues(true); Filter filter = search(newSearcher(indexReader, false, true), - Queries.newNonNestedFilter(Version.CURRENT), filterAggregationBuilder, fieldType1, fieldType2); + Queries.newNonNestedFilter(), filterAggregationBuilder, fieldType1, fieldType2); assertEquals("filterAgg", filter.getName()); assertEquals(3L, filter.getDocCount()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java index 990ae8e1f09a2..4342daacd3158 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java @@ -255,4 +255,14 @@ public static Version maxCompatibleVersion(Version version) { public static Version randomIndexCompatibleVersion(Random random) { return randomVersionBetween(random, Version.CURRENT.minimumIndexCompatibilityVersion(), Version.CURRENT); } + + /** + * Returns a random version index compatible with the given version, but not the given version. + */ + public static Version randomPreviousCompatibleVersion(Random random, Version version) { + // TODO: change this to minimumCompatibilityVersion(), but first need to remove released/unreleased + // versions so getPreviousVerison returns the *actual* previous version. Otherwise eg 8.0.0 returns say 7.0.2 for previous, + // but 7.2.0 for minimum compat + return randomVersionBetween(random, version.minimumIndexCompatibilityVersion(), version); + } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java index 0143dad55b0e0..cab910ad6c430 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java @@ -76,8 +76,8 @@ public void testRandomVersionBetween() { assertTrue(got.onOrBefore(VersionUtils.allReleasedVersions().get(0))); // unbounded upper - got = VersionUtils.randomVersionBetween(random(), fromId(7000099), null); - assertTrue(got.onOrAfter(fromId(7000099))); + got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), null); + assertTrue(got.onOrAfter(VersionUtils.getFirstVersion())); assertTrue(got.onOrBefore(Version.CURRENT)); got = VersionUtils.randomVersionBetween(random(), VersionUtils.getPreviousVersion(), null); assertTrue(got.onOrAfter(VersionUtils.getPreviousVersion())); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 7d7263699be88..fac390b6e7692 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -108,9 +108,8 @@ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exceptio assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("First test section")); assertThat(testSection.getSkipSection(), notNullValue()); - assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); - assertThat(testSection.getSkipSection().getUpperVersion(), - equalTo(Version.V_6_2_0)); + assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); + assertThat(testSection.getSkipSection().getUpperVersion(), equalTo(Version.fromString("6.2.0"))); assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(testSection.getExecutableSections().size(), equalTo(2)); DoSection doSection = (DoSection)testSection.getExecutableSections().get(0); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index da485a8430e28..48655a61813d8 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -148,7 +148,7 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned")); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), - equalTo(Version.V_6_0_0)); + equalTo(Version.fromString("6.0.0"))); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class)); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java index e2d30d0bc2099..bf73f2efba42a 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java @@ -118,7 +118,7 @@ public void testParseSetupAndSkipSectionNoSkip() throws Exception { assertThat(setupSection, notNullValue()); assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); assertThat(setupSection.getSkipSection(), notNullValue()); - assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); + assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(Version.V_6_3_0)); assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java index e5e466a82cc18..2e5081cab34eb 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java @@ -37,7 +37,7 @@ public void testSkip() { SkipSection section = new SkipSection("6.0.0 - 6.1.0", randomBoolean() ? Collections.emptyList() : Collections.singletonList("warnings"), "foobar"); assertFalse(section.skip(Version.CURRENT)); - assertTrue(section.skip(Version.V_6_0_0)); + assertTrue(section.skip(Version.fromString("6.0.0"))); section = new SkipSection(randomBoolean() ? null : "6.0.0 - 6.1.0", Collections.singletonList("boom"), "foobar"); assertTrue(section.skip(Version.CURRENT)); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java index 96bff85389c8a..b2baf40267287 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java @@ -75,8 +75,8 @@ public void testParseWithSkip() throws Exception { TeardownSection section = TeardownSection.parse(parser); assertThat(section, notNullValue()); assertThat(section.getSkipSection().isEmpty(), equalTo(false)); - assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_6_0_0)); - assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_6_3_0)); + assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); + assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.fromString("6.3.0"))); assertThat(section.getSkipSection().getReason(), equalTo("there is a reason")); assertThat(section.getDoSections().size(), equalTo(2)); assertThat(((DoSection)section.getDoSections().get(0)).getApiCallSection().getApi(), equalTo("delete")); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java index 755d6faef0ba2..da38d1d2903a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -44,11 +43,7 @@ public MachineLearningFeatureSetUsage(StreamInput in) throws IOException { super(in); this.jobsUsage = in.readMap(); this.datafeedsUsage = in.readMap(); - if (in.getVersion().onOrAfter(Version.V_6_5_0)) { - this.nodeCount = in.readInt(); - } else { - this.nodeCount = -1; - } + this.nodeCount = in.readInt(); } @Override @@ -56,9 +51,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeMap(jobsUsage); out.writeMap(datafeedsUsage); - if (out.getVersion().onOrAfter(Version.V_6_5_0)) { - out.writeInt(nodeCount); - } + out.writeInt(nodeCount); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java index 8585e4122e673..1c39c6d985d45 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java @@ -7,8 +7,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -68,14 +66,7 @@ static AggProvider fromParsedAggs(AggregatorFactories.Builder parsedAggs) throws } static AggProvider fromStream(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers - return new AggProvider(in.readMap(), in.readOptionalWriteable(AggregatorFactories.Builder::new), in.readException()); - } else if (in.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects - return new AggProvider(in.readMap(), null, null); - } else { // only supports eagerly parsed objects - // Upstream, we have read the bool already and know for sure that we have parsed aggs in the stream - return AggProvider.fromParsedAggs(new AggregatorFactories.Builder(in)); - } + return new AggProvider(in.readMap(), in.readOptionalWriteable(AggregatorFactories.Builder::new), in.readException()); } AggProvider(Map aggs, AggregatorFactories.Builder parsedAggs, Exception parsingException) { @@ -92,29 +83,9 @@ static AggProvider fromStream(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers - out.writeMap(aggs); - out.writeOptionalWriteable(parsedAggs); - out.writeException(parsingException); - } else if (out.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects - // We allow the lazy parsing nodes that have the bug throw any parsing errors themselves as - // they already have the ability to fully parse the passed Maps - out.writeMap(aggs); - } else { // only supports eagerly parsed objects - if (parsingException != null) { - if (parsingException instanceof IOException) { - throw (IOException) parsingException; - } else { - throw new ElasticsearchException(parsingException); - } - } else if (parsedAggs == null) { - // This is an admittedly rare case but we should fail early instead of writing null when there - // actually are aggregations defined - throw new ElasticsearchException("Unsupported operation: parsed aggregations are null"); - } - // Upstream we already verified that this calling object is not null, no need to write a second boolean to the stream - parsedAggs.writeTo(out); - } + out.writeMap(aggs); + out.writeOptionalWriteable(parsedAggs); + out.writeException(parsingException); } public Exception getParsingException() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProvider.java index ff6d2f595af81..755c5a3526d01 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProvider.java @@ -7,8 +7,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -74,13 +72,7 @@ static QueryProvider fromParsedQuery(QueryBuilder parsedQuery) throws IOExceptio } static QueryProvider fromStream(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers - return new QueryProvider(in.readMap(), in.readOptionalNamedWriteable(QueryBuilder.class), in.readException()); - } else if (in.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects - return new QueryProvider(in.readMap(), null, null); - } else { // only supports eagerly parsed objects - return QueryProvider.fromParsedQuery(in.readNamedWriteable(QueryBuilder.class)); - } + return new QueryProvider(in.readMap(), in.readOptionalNamedWriteable(QueryBuilder.class), in.readException()); } QueryProvider(Map query, QueryBuilder parsedQuery, Exception parsingException) { @@ -95,28 +87,9 @@ static QueryProvider fromStream(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers - out.writeMap(query); - out.writeOptionalNamedWriteable(parsedQuery); - out.writeException(parsingException); - } else if (out.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects - // We allow the lazy parsing nodes that have the bug throw any parsing errors themselves as - // they already have the ability to fully parse the passed Maps - out.writeMap(query); - } else { // only supports eagerly parsed objects - if (parsingException != null) { // Do we have a parsing error? Throw it - if (parsingException instanceof IOException) { - throw (IOException) parsingException; - } else { - throw new ElasticsearchException(parsingException); - } - } else if (parsedQuery == null) { // Do we have a query defined but not parsed? - // This is an admittedly rare case but we should fail early instead of writing null when there - // actually is a query defined - throw new ElasticsearchException("Unsupported operation: parsed query is null"); - } - out.writeNamedWriteable(parsedQuery); - } + out.writeMap(query); + out.writeOptionalNamedWriteable(parsedQuery); + out.writeException(parsingException); } public Exception getParsingException() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java index 08d754b4e5357..bde94b116c982 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java @@ -139,11 +139,11 @@ private static void buildRoleQuery(User user, ScriptService scriptService, Shard NestedHelper nestedHelper = new NestedHelper(queryShardContext.getMapperService()); if (nestedHelper.mightMatchNestedDocs(roleQuery)) { roleQuery = new BooleanQuery.Builder().add(roleQuery, FILTER) - .add(Queries.newNonNestedFilter(queryShardContext.indexVersionCreated()), FILTER).build(); + .add(Queries.newNonNestedFilter(), FILTER).build(); } // If access is allowed on root doc then also access is allowed on all nested docs of that root document: BitSetProducer rootDocs = queryShardContext - .bitsetFilter(Queries.newNonNestedFilter(queryShardContext.indexVersionCreated())); + .bitsetFilter(Queries.newNonNestedFilter()); ToChildBlockJoinQuery includeNestedDocs = new ToChildBlockJoinQuery(roleQuery, rootDocs); filter.add(includeNestedDocs, SHOULD); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java index dc87cf744cb98..d544584376f47 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java @@ -5,13 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; @@ -29,9 +24,7 @@ import java.util.Collections; import java.util.Map; -import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; public class AggProviderTests extends AbstractSerializingTestCase { @@ -96,68 +89,6 @@ public void testEmptyAggMap() throws IOException { assertThat(e.getMessage(), equalTo("Datafeed aggregations are not parsable")); } - public void testSerializationBetweenBugVersion() throws IOException { - AggProvider tempAggProvider = createRandomValidAggProvider(); - AggProvider aggProviderWithEx = new AggProvider(tempAggProvider.getAggs(), tempAggProvider.getParsedAggs(), new IOException("ex")); - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_6_2); - aggProviderWithEx.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { - in.setVersion(Version.V_6_6_2); - AggProvider streamedAggProvider = AggProvider.fromStream(in); - assertThat(streamedAggProvider.getAggs(), equalTo(aggProviderWithEx.getAggs())); - assertThat(streamedAggProvider.getParsingException(), is(nullValue())); - - AggregatorFactories.Builder streamedParsedAggs = XContentObjectTransformer.aggregatorTransformer(xContentRegistry()) - .fromMap(streamedAggProvider.getAggs()); - assertThat(streamedParsedAggs, equalTo(aggProviderWithEx.getParsedAggs())); - assertThat(streamedAggProvider.getParsedAggs(), is(nullValue())); - } - } - } - - public void testSerializationBetweenEagerVersion() throws IOException { - AggProvider validAggProvider = createRandomValidAggProvider(); - - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_0_0); - validAggProvider.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { - in.setVersion(Version.V_6_0_0); - AggProvider streamedAggProvider = AggProvider.fromStream(in); - assertThat(streamedAggProvider.getAggs(), equalTo(validAggProvider.getAggs())); - assertThat(streamedAggProvider.getParsingException(), is(nullValue())); - assertThat(streamedAggProvider.getParsedAggs(), equalTo(validAggProvider.getParsedAggs())); - } - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - AggProvider aggProviderWithEx = new AggProvider(validAggProvider.getAggs(), - validAggProvider.getParsedAggs(), - new IOException("bad parsing")); - output.setVersion(Version.V_6_0_0); - IOException ex = expectThrows(IOException.class, () -> aggProviderWithEx.writeTo(output)); - assertThat(ex.getMessage(), equalTo("bad parsing")); - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - AggProvider aggProviderWithEx = new AggProvider(validAggProvider.getAggs(), - validAggProvider.getParsedAggs(), - new ElasticsearchException("bad parsing")); - output.setVersion(Version.V_6_0_0); - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> aggProviderWithEx.writeTo(output)); - assertNotNull(ex.getCause()); - assertThat(ex.getCause().getMessage(), equalTo("bad parsing")); - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - AggProvider aggProviderWithOutParsed = new AggProvider(validAggProvider.getAggs(), null, null); - output.setVersion(Version.V_6_0_0); - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> aggProviderWithOutParsed.writeTo(output)); - assertThat(ex.getMessage(), equalTo("Unsupported operation: parsed aggregations are null")); - } - } - @Override protected AggProvider mutateInstance(AggProvider instance) throws IOException { Exception parsingException = instance.getParsingException(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index 6b664777a2d86..062504cbfdc3c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -762,10 +762,10 @@ public void testSerializationOfComplexAggsBetweenVersions() throws IOException { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_0_0); + output.setVersion(Version.CURRENT); datafeedConfig.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - in.setVersion(Version.V_6_0_0); + in.setVersion(Version.CURRENT); DatafeedConfig streamedDatafeedConfig = new DatafeedConfig(in); assertEquals(datafeedConfig, streamedDatafeedConfig); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java index 571c9e81a9068..6aa7487147ca1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java @@ -321,10 +321,10 @@ public void testSerializationOfComplexAggsBetweenVersions() throws IOException { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_0_0); + output.setVersion(Version.CURRENT); datafeedUpdate.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - in.setVersion(Version.V_6_0_0); + in.setVersion(Version.CURRENT); DatafeedUpdate streamedDatafeedUpdate = new DatafeedUpdate(in); assertEquals(datafeedUpdate, streamedDatafeedUpdate); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProviderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProviderTests.java index fb6c2e280d975..8d113aba33579 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProviderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProviderTests.java @@ -5,13 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; @@ -32,9 +27,7 @@ import java.util.Collections; import java.util.Map; -import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; public class QueryProviderTests extends AbstractSerializingTestCase { @@ -96,74 +89,6 @@ public void testEmptyQueryMap() throws IOException { assertThat(e.getMessage(), equalTo("Datafeed query is not parsable")); } - public void testSerializationBetweenBugVersion() throws IOException { - QueryProvider tempQueryProvider = createRandomValidQueryProvider(); - QueryProvider queryProviderWithEx = new QueryProvider(tempQueryProvider.getQuery(), - tempQueryProvider.getParsedQuery(), - new IOException("ex")); - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_6_2); - queryProviderWithEx.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { - in.setVersion(Version.V_6_6_2); - QueryProvider streamedQueryProvider = QueryProvider.fromStream(in); - assertThat(streamedQueryProvider.getQuery(), equalTo(queryProviderWithEx.getQuery())); - assertThat(streamedQueryProvider.getParsingException(), is(nullValue())); - - QueryBuilder streamedParsedQuery = XContentObjectTransformer.queryBuilderTransformer(xContentRegistry()) - .fromMap(streamedQueryProvider.getQuery()); - assertThat(streamedParsedQuery, equalTo(queryProviderWithEx.getParsedQuery())); - assertThat(streamedQueryProvider.getParsedQuery(), is(nullValue())); - } - } - } - - public void testSerializationBetweenEagerVersion() throws IOException { - QueryProvider validQueryProvider = createRandomValidQueryProvider(); - - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.V_6_0_0); - validQueryProvider.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { - in.setVersion(Version.V_6_0_0); - - QueryProvider streamedQueryProvider = QueryProvider.fromStream(in); - XContentObjectTransformer transformer = XContentObjectTransformer.queryBuilderTransformer(xContentRegistry()); - Map sourceQueryMapWithDefaults = transformer.toMap(transformer.fromMap(validQueryProvider.getQuery())); - - assertThat(streamedQueryProvider.getQuery(), equalTo(sourceQueryMapWithDefaults)); - assertThat(streamedQueryProvider.getParsingException(), is(nullValue())); - assertThat(streamedQueryProvider.getParsedQuery(), equalTo(validQueryProvider.getParsedQuery())); - } - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - QueryProvider queryProviderWithEx = new QueryProvider(validQueryProvider.getQuery(), - validQueryProvider.getParsedQuery(), - new IOException("bad parsing")); - output.setVersion(Version.V_6_0_0); - IOException ex = expectThrows(IOException.class, () -> queryProviderWithEx.writeTo(output)); - assertThat(ex.getMessage(), equalTo("bad parsing")); - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - QueryProvider queryProviderWithEx = new QueryProvider(validQueryProvider.getQuery(), - validQueryProvider.getParsedQuery(), - new ElasticsearchException("bad parsing")); - output.setVersion(Version.V_6_0_0); - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> queryProviderWithEx.writeTo(output)); - assertNotNull(ex.getCause()); - assertThat(ex.getCause().getMessage(), equalTo("bad parsing")); - } - - try (BytesStreamOutput output = new BytesStreamOutput()) { - QueryProvider queryProviderWithOutParsed = new QueryProvider(validQueryProvider.getQuery(), null, null); - output.setVersion(Version.V_6_0_0); - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> queryProviderWithOutParsed.writeTo(output)); - assertThat(ex.getMessage(), equalTo("Unsupported operation: parsed query is null")); - } - } - @Override protected QueryProvider mutateInstance(QueryProvider instance) throws IOException { Exception parsingException = instance.getParsingException(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java index b784310fdb2a8..ed73ef0562b43 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponseTests.java @@ -5,12 +5,10 @@ */ package org.elasticsearch.xpack.core.security.action.token; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; public class CreateTokenResponseTests extends ESTestCase { @@ -37,56 +35,4 @@ public void testSerialization() throws Exception { } } } - - public void testSerializationToPre62Version() throws Exception { - CreateTokenResponse response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), - randomBoolean() ? null : "FULL", randomBoolean() ? null : randomAlphaOfLengthBetween(1, 10)); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_1_4); - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(version); - response.writeTo(output); - try (StreamInput input = output.bytes().streamInput()) { - input.setVersion(version); - CreateTokenResponse serialized = new CreateTokenResponse(); - serialized.readFrom(input); - assertNull(serialized.getRefreshToken()); - assertEquals(response.getTokenString(), serialized.getTokenString()); - assertEquals(response.getExpiresIn(), serialized.getExpiresIn()); - assertEquals(response.getScope(), serialized.getScope()); - } - } - } - - public void testSerializationToPost62Pre65Version() throws Exception { - CreateTokenResponse response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), - randomBoolean() ? null : "FULL", randomAlphaOfLengthBetween(1, 10)); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_2_0, Version.V_6_4_0); - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(version); - response.writeTo(output); - try (StreamInput input = output.bytes().streamInput()) { - input.setVersion(version); - CreateTokenResponse serialized = new CreateTokenResponse(); - serialized.readFrom(input); - assertEquals(response, serialized); - } - } - - // no refresh token - response = new CreateTokenResponse(randomAlphaOfLengthBetween(1, 10), TimeValue.timeValueMinutes(20L), - randomBoolean() ? null : "FULL", null); - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(version); - response.writeTo(output); - try (StreamInput input = output.bytes().streamInput()) { - input.setVersion(version); - CreateTokenResponse serialized = new CreateTokenResponse(); - serialized.readFrom(input); - assertEquals("", serialized.getRefreshToken()); - assertEquals(response.getTokenString(), serialized.getTokenString()); - assertEquals(response.getExpiresIn(), serialized.getExpiresIn()); - assertEquals(response.getScope(), serialized.getScope()); - } - } - } } diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index b0f5a556ac627..57ded2d069d30 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import java.util.List; @@ -19,8 +18,7 @@ public class IndexDeprecationChecksTests extends ESTestCase { public void testOldIndicesCheck() { - Version createdWith = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, - VersionUtils.getPreviousVersion(Version.V_7_0_0)); + Version createdWith = Version.fromString("1.0.0"); IndexMetaData indexMetaData = IndexMetaData.builder("test") .settings(settings(createdWith)) .numberOfShards(1) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java index 14e434099f843..a81ee5f86e982 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java @@ -125,7 +125,7 @@ private void getForecastRequestStats(String jobId, String forecastId, ActionList } static void validate(Job job, ForecastJobAction.Request request) { - if (job.getJobVersion() == null || job.getJobVersion().before(Version.V_6_1_0)) { + if (job.getJobVersion() == null || job.getJobVersion().before(Version.fromString("6.1.0"))) { throw ExceptionsHelper.badRequestException( "Cannot run forecast because jobs created prior to version 6.1 are not supported"); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java index df447d7ec6c35..2de1f79c64aa6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.ToXContent; @@ -29,11 +28,11 @@ import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; -import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; @@ -295,21 +294,6 @@ public void testNodeCount() throws Exception { source = new XContentSource(builder); } assertThat(source.getValue("node_count"), equalTo(nodeCount)); - - BytesStreamOutput oldOut = new BytesStreamOutput(); - oldOut.setVersion(Version.V_6_0_0); - usage.writeTo(oldOut); - StreamInput oldInput = oldOut.bytes().streamInput(); - oldInput.setVersion(Version.V_6_0_0); - XPackFeatureSet.Usage oldSerializedUsage = new MachineLearningFeatureSetUsage(oldInput); - - XContentSource oldSource; - try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - oldSerializedUsage.toXContent(builder, ToXContent.EMPTY_PARAMS); - oldSource = new XContentSource(builder); - } - - assertNull(oldSource.getValue("node_count")); } public void testUsageGivenMlMetadataNotInstalled() throws Exception { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportForecastJobActionRequestTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportForecastJobActionRequestTests.java index be115af6bcd72..e60e86cc54960 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportForecastJobActionRequestTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportForecastJobActionRequestTests.java @@ -23,7 +23,7 @@ public class TransportForecastJobActionRequestTests extends ESTestCase { public void testValidate_jobVersionCannonBeBefore61() { Job.Builder jobBuilder = createTestJob("forecast-it-test-job-version"); - jobBuilder.setJobVersion(Version.V_6_0_1); + jobBuilder.setJobVersion(Version.fromString("6.0.1")); ForecastJobAction.Request request = new ForecastJobAction.Request(); Exception e = expectThrows(ElasticsearchStatusException.class, () -> TransportForecastJobAction.validate(jobBuilder.build(), request)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java index 9fc75d0e3385c..7e498efa4df2e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java @@ -241,7 +241,7 @@ private Version getDefinedVersion(String username) { case RemoteMonitoringUser.NAME: return RemoteMonitoringUser.DEFINED_SINCE; default: - return Version.V_6_0_0; + return Version.CURRENT.minimumIndexCompatibilityVersion(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java index 78d6e22ac3645..80d7b4b4d00ad 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java @@ -77,7 +77,7 @@ public void init() throws Exception { ClusterState state = mock(ClusterState.class); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("id1", buildNewFakeTransportAddress(), Version.CURRENT)) - .add(new DiscoveryNode("id2", buildNewFakeTransportAddress(), Version.V_6_0_0)) + .add(new DiscoveryNode("id2", buildNewFakeTransportAddress(), Version.CURRENT.minimumCompatibilityVersion())) .build(); when(state.nodes()).thenReturn(nodes); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java index 070ea855800f7..ea1b6483fd795 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.security.authc.esnative; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.MockSecureSettings; @@ -167,7 +166,6 @@ private void verifySuccessfulAuthentication(boolean enabled) throws Exception { verify(usersStore, times(2)).getReservedUserInfo(eq(principal), any(ActionListener.class)); final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); verify(securityIndex, times(2)).checkMappingVersion(predicateCaptor.capture()); - verifyVersionPredicate(principal, predicateCaptor.getValue()); verifyNoMoreInteractions(usersStore); } @@ -186,7 +184,6 @@ public void testLookup() throws Exception { final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); verify(securityIndex).checkMappingVersion(predicateCaptor.capture()); - verifyVersionPredicate(principal, predicateCaptor.getValue()); PlainActionFuture future = new PlainActionFuture<>(); reservedRealm.doLookupUser("foobar", future); @@ -234,7 +231,6 @@ public void testLookupThrows() throws Exception { final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); verify(securityIndex).checkMappingVersion(predicateCaptor.capture()); - verifyVersionPredicate(principal, predicateCaptor.getValue()); verifyNoMoreInteractions(usersStore); } @@ -448,28 +444,4 @@ public static void mockGetAllReservedUserInfo(NativeUsersStore usersStore, Map versionPredicate) { - switch (principal) { - case LogstashSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); - break; - case BeatsSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_6_2_3), is(false)); - assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); - break; - case APMSystemUser.NAME: - assertThat(versionPredicate.test(Version.V_6_4_0), is(false)); - assertThat(versionPredicate.test(Version.V_6_5_0), is(true)); - break; - case RemoteMonitoringUser.NAME: - assertThat(versionPredicate.test(Version.V_6_4_0), is(false)); - assertThat(versionPredicate.test(Version.V_6_5_0), is(true)); - break; - default: - assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); - break; - } - assertThat(versionPredicate.test(Version.V_7_0_0), is(true)); - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java index 3905bb6d3b4c1..e0ab888f4b4bb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -304,7 +304,7 @@ public void testSerialization() throws Exception { public void testSerializationPreV71() throws Exception { final ExpressionRoleMapping original = randomRoleMapping(false); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_0_2); BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(version); original.writeTo(output); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java index 9bd69d3eb1a77..be73972f3a1e3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java @@ -43,7 +43,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.is; public class IndicesPermissionTests extends ESTestCase { @@ -214,7 +213,7 @@ public void testIndicesPrivilegesStreaming() throws IOException { assertEquals(readIndicesPrivileges, indicesPrivileges.build()); out = new BytesStreamOutput(); - out.setVersion(Version.V_6_0_0); + out.setVersion(Version.CURRENT); indicesPrivileges = RoleDescriptor.IndicesPrivileges.builder(); indicesPrivileges.grantedFields(allowed); indicesPrivileges.deniedFields(denied); @@ -224,7 +223,7 @@ public void testIndicesPrivilegesStreaming() throws IOException { indicesPrivileges.build().writeTo(out); out.close(); in = out.bytes().streamInput(); - in.setVersion(Version.V_6_0_0); + in.setVersion(Version.CURRENT); RoleDescriptor.IndicesPrivileges readIndicesPrivileges2 = new RoleDescriptor.IndicesPrivileges(in); assertEquals(readIndicesPrivileges, readIndicesPrivileges2); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 3dd5395b1fea0..6e7a9806781b5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -5,16 +5,6 @@ */ package org.elasticsearch.xpack.security.support; -import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.BiConsumer; - import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.action.Action; @@ -52,12 +42,22 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.security.test.SecurityTestUtils; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.template.TemplateUtils; +import org.elasticsearch.xpack.security.test.SecurityTestUtils; import org.hamcrest.Matchers; import org.junit.Before; +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; + import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_MAIN_TEMPLATE_7; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.TEMPLATE_VERSION_PATTERN; import static org.hamcrest.Matchers.equalTo; @@ -429,10 +429,7 @@ public void testIndexTemplateVersionMatching() throws Exception { assertTrue(SecurityIndexManager.checkTemplateExistsAndVersionMatches( SecurityIndexManager.SECURITY_MAIN_TEMPLATE_7, clusterState, logger, - Version.V_6_0_0::before)); - assertFalse(SecurityIndexManager.checkTemplateExistsAndVersionMatches( - SecurityIndexManager.SECURITY_MAIN_TEMPLATE_7, clusterState, logger, - Version.V_6_0_0::after)); + v -> Version.CURRENT.minimumCompatibilityVersion().before(v))); } public void testUpToDateMappingsAreIdentifiedAsUpToDate() throws IOException { diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java index d4ce531cd5f0f..bb8b06287a11c 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java @@ -25,7 +25,7 @@ public class VersionParityTests extends WebServerTestCase { public void testExceptionThrownOnIncompatibleVersions() throws IOException, SQLException { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.CURRENT)); + Version version = VersionUtils.randomPreviousCompatibleVersion(random(), Version.CURRENT); prepareRequest(version); String url = JdbcConfiguration.URL_PREFIX + webServer().getHostName() + ":" + webServer().getPort(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java index a96c04ab7cd99..58a649de48c02 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; @@ -223,7 +224,8 @@ public void testThatTemplatesExist() { // otherwise a rolling upgrade would not work as expected, when the node has a .watches shard on it public void testThatTemplatesAreAppliedOnNewerNodes() { DiscoveryNode localNode = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); - DiscoveryNode masterNode = new DiscoveryNode("master", ESTestCase.buildNewFakeTransportAddress(), Version.V_6_0_0); + DiscoveryNode masterNode = new DiscoveryNode("master", ESTestCase.buildNewFakeTransportAddress(), + VersionUtils.randomPreviousCompatibleVersion(random(), Version.CURRENT)); DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("master").add(localNode).add(masterNode).build(); ClusterChangedEvent event = createClusterChangedEvent(Arrays.asList(WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME, From 0b94416cc1e1c7b871e4521eb1129dca628acffa Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Mon, 13 May 2019 22:17:10 -0400 Subject: [PATCH 090/321] SQL: Add initial geo support (#42031) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds an initial limited implementations of geo features to SQL. This implementation is based on the [OpenGIS® Implementation Standard for Geographic information - Simple feature access](http://www.opengeospatial.org/standards/sfs), which is the current standard for GIS system implementation. This effort is concentrate on SQL option AKA ISO 19125-2. ## Queries that are supported as a result of this initial implementation ### Metadata commands - `DESCRIBE table` - returns the correct column types `GEOMETRY` for geo shapes and geo points. - `SHOW FUNCTIONS` - returns a list that includes supported `ST_` functions - `SYS TYPES` and `SYS COLUMNS` display correct types `GEO_SHAPE` and `GEO_POINT` for geo shapes and geo points accordingly. ### Returning geoshapes and geopoints from elasticsearch - `SELECT geom FROM table` - returns the geoshapes and geo_points as libs/geo objects in JDBC or as WKT strings in console. - `SELECT ST_AsWKT(geom) FROM table;` and `SELECT ST_AsText(geom) FROM table;`- returns the geoshapes ang geopoints in their WKT representation; ### Using geopoints to elasticsearch - The following functions will be supported for geopoints in queries, sorting and aggregations: `ST_GeomFromText`, `ST_X`, `ST_Y`, `ST_Z`, `ST_GeometryType`, and `ST_Distance`. In most cases when used in queries, sorting and aggregations, these function are translated into script. These functions can be used in the SELECT clause for both geopoints and geoshapes. - `SELECT * FROM table WHERE ST_Distance(ST_GeomFromText(POINT(1 2), point) < 10;` - returns all records for which `point` is located within 10m from the `POINT(1 2)`. In this case the WHERE clause is translated into a range query. ## Limitations: Geoshapes cannot be used in queries, sorting and aggregations as part of this initial effort. In order to fully take advantage of geoshapes we would need to have access to geoshape doc values, which is coming in #37206. `ST_Z` cannot be used on geopoints in queries, sorting and aggregations since we don't store altitude in geo_point doc values. Relates to #29872 --- docs/reference/sql/functions/geo.asciidoc | 192 +++++ docs/reference/sql/functions/index.asciidoc | 9 + .../sql/language/data-types.asciidoc | 2 + docs/reference/sql/limitations.asciidoc | 11 + .../common/geo/parsers/ShapeParser.java | 22 + x-pack/plugin/sql/build.gradle | 1 + x-pack/plugin/sql/jdbc/build.gradle | 3 + .../elasticsearch/xpack/sql/jdbc/EsType.java | 4 +- .../xpack/sql/jdbc/ExtraTypes.java | 1 + .../xpack/sql/jdbc/JdbcColumnInfo.java | 3 +- .../xpack/sql/jdbc/JdbcConfiguration.java | 4 +- .../xpack/sql/jdbc/JdbcPreparedStatement.java | 19 +- .../xpack/sql/jdbc/TypeConverter.java | 11 + .../xpack/sql/jdbc/TypeUtils.java | 2 + x-pack/plugin/sql/qa/build.gradle | 17 +- .../sql/qa/multi_node/GeoJdbcCsvSpecIT.java | 16 + .../sql/qa/multi_node/GeoJdbcSqlSpecIT.java | 15 + .../sql/qa/single_node/GeoJdbcCsvSpecIT.java | 31 + .../sql/qa/single_node/GeoJdbcSqlSpecIT.java | 15 + .../xpack/sql/qa/geo/GeoCsvSpecTestCase.java | 79 ++ .../xpack/sql/qa/geo/GeoDataLoader.java | 158 ++++ .../xpack/sql/qa/geo/GeoSqlSpecTestCase.java | 94 +++ .../xpack/sql/qa/jdbc/CsvTestUtils.java | 16 +- .../xpack/sql/qa/jdbc/DataLoader.java | 4 +- .../xpack/sql/qa/jdbc/JdbcAssert.java | 33 + .../xpack/sql/qa/jdbc/LocalH2.java | 2 +- .../qa/src/main/resources/command.csv-spec | 11 +- .../qa/src/main/resources/docs/docs.csv-spec | 14 +- .../qa/src/main/resources/docs/geo.csv-spec | 79 ++ .../sql/qa/src/main/resources/geo/geo.csv | 16 + .../src/main/resources/geo/geosql-bulk.json | 33 + .../qa/src/main/resources/geo/geosql.csv-spec | 288 ++++++++ .../sql/qa/src/main/resources/geo/geosql.json | 28 + .../qa/src/main/resources/geo/geosql.sql-spec | 24 + .../src/main/resources/geo/setup_test_geo.sql | 9 + .../qa/src/main/resources/ogc/OGC-NOTICE.txt | 41 ++ .../qa/src/main/resources/ogc/ogc.csv-spec | 36 + .../sql/qa/src/main/resources/ogc/ogc.json | 58 ++ .../qa/src/main/resources/ogc/ogc.sql-spec | 85 +++ .../sql/qa/src/main/resources/ogc/sqltsch.sql | 672 ++++++++++++++++++ .../single-node-only/command-sys-geo.csv-spec | 15 + .../xpack/sql/analysis/analyzer/Verifier.java | 32 +- .../search/extractor/FieldHitExtractor.java | 45 +- .../xpack/sql/expression/TypeResolutions.java | 5 + .../expression/function/FunctionRegistry.java | 19 + .../function/scalar/Processors.java | 7 + .../function/scalar/geo/GeoProcessor.java | 97 +++ .../function/scalar/geo/GeoShape.java | 222 ++++++ .../function/scalar/geo/StAswkt.java | 45 ++ .../function/scalar/geo/StDistance.java | 74 ++ .../scalar/geo/StDistanceFunction.java | 27 + .../function/scalar/geo/StDistancePipe.java | 56 ++ .../scalar/geo/StDistanceProcessor.java | 87 +++ .../function/scalar/geo/StGeometryType.java | 45 ++ .../function/scalar/geo/StWkttosql.java | 67 ++ .../scalar/geo/StWkttosqlProcessor.java | 76 ++ .../expression/function/scalar/geo/StX.java | 45 ++ .../expression/function/scalar/geo/StY.java | 45 ++ .../expression/function/scalar/geo/StZ.java | 45 ++ .../function/scalar/geo/UnaryGeoFunction.java | 84 +++ .../whitelist/InternalSqlScriptUtils.java | 75 +- .../expression/gen/script/ScriptWeaver.java | 8 + .../sql/expression/literal/Intervals.java | 1 - .../sql/expression/literal/Literals.java | 2 + .../xpack/sql/planner/QueryTranslator.java | 26 + .../sql/querydsl/query/GeoDistanceQuery.java | 77 ++ .../xpack/sql/type/DataType.java | 7 + .../xpack/sql/type/DataTypes.java | 4 + .../xpack/sql/type/ExtTypes.java | 3 +- .../xpack/sql/plugin/sql_whitelist.txt | 23 +- .../analyzer/FieldAttributeTests.java | 2 +- .../analyzer/VerifierErrorMessagesTests.java | 24 + .../extractor/FieldHitExtractorTests.java | 134 ++++ .../scalar/geo/GeoProcessorTests.java | 106 +++ .../scalar/geo/StDistanceProcessorTests.java | 66 ++ .../scalar/geo/StWkttosqlProcessorTests.java | 42 ++ .../xpack/sql/optimizer/OptimizerTests.java | 10 + .../logical/command/sys/SysColumnsTests.java | 16 +- .../logical/command/sys/SysTypesTests.java | 2 +- .../sql/planner/QueryTranslatorTests.java | 107 ++- .../xpack/sql/type/TypesTests.java | 7 +- .../sql/src/test/resources/mapping-geo.json | 3 + .../mapping-multi-field-variation.json | 4 +- .../mapping-multi-field-with-nested.json | 1 + 84 files changed, 3976 insertions(+), 70 deletions(-) create mode 100644 docs/reference/sql/functions/geo.asciidoc create mode 100644 x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcCsvSpecIT.java create mode 100644 x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/GeoJdbcSqlSpecIT.java create mode 100644 x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcCsvSpecIT.java create mode 100644 x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java create mode 100644 x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java create mode 100644 x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoDataLoader.java create mode 100644 x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java create mode 100644 x-pack/plugin/sql/qa/src/main/resources/docs/geo.csv-spec create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/geosql-bulk.json create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/geosql.json create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/geosql.sql-spec create mode 100644 x-pack/plugin/sql/qa/src/main/resources/geo/setup_test_geo.sql create mode 100644 x-pack/plugin/sql/qa/src/main/resources/ogc/OGC-NOTICE.txt create mode 100644 x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.csv-spec create mode 100644 x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.json create mode 100644 x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.sql-spec create mode 100644 x-pack/plugin/sql/qa/src/main/resources/ogc/sqltsch.sql create mode 100644 x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessor.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StAswkt.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistance.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceFunction.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistancePipe.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StGeometryType.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosql.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessor.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StX.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StY.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StZ.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/UnaryGeoFunction.java create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/GeoDistanceQuery.java create mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessorTests.java create mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessorTests.java create mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessorTests.java diff --git a/docs/reference/sql/functions/geo.asciidoc b/docs/reference/sql/functions/geo.asciidoc new file mode 100644 index 0000000000000..f5ed716eaeb29 --- /dev/null +++ b/docs/reference/sql/functions/geo.asciidoc @@ -0,0 +1,192 @@ +[role="xpack"] +[testenv="basic"] +[[sql-functions-geo]] +=== Geo Functions + +The geo functions work with geometries stored in `geo_point` and `geo_shape` fields, or returned by other geo functions. + +==== Limitations + +Both <> and <> types are represented in SQL as geometry and can be used +interchangeably with the following exceptions: + +* `geo_shape` fields don't have doc values, therefore these fields cannot be used for filtering, grouping or sorting. + +* `geo_points` fields are indexed and have doc values by default, however only latitude and longitude are stored and + indexed with some loss of precision from the original values (4.190951585769653E-8 for the latitude and + 8.381903171539307E-8 for longitude). The altitude component is accepted but not stored in doc values nor indexed. + Therefore calling `ST_Z` function in the filtering, grouping or sorting will return `null`. + +==== Geometry Conversion + +[[sql-functions-geo-st-as-wkt]] +===== `ST_AsWKT` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_AsWKT(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: string + +.Description: + +Returns the WKT representation of the `geometry`. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt] +-------------------------------------------------- + + +[[sql-functions-geo-st-wkt-to-sql]] +===== `ST_WKTToSQL` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_WKTToSQL(string<1>) +-------------------------------------------------- + +*Input*: + +<1> string WKT representation of geometry + +*Output*: geometry + +.Description: + +Returns the geometry from WKT representation. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt] +-------------------------------------------------- + +==== Geometry Properties + +[[sql-functions-geo-st-geometrytype]] +===== `ST_GeometryType` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_GeometryType(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: string + +.Description: + +Returns the type of the `geometry` such as POINT, MULTIPOINT, LINESTRING, MULTILINESTRING, POLYGON, MULTIPOLYGON, GEOMETRYCOLLECTION, ENVELOPE or CIRCLE. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[geometrytype] +-------------------------------------------------- + +[[sql-functions-geo-st-x]] +===== `ST_X` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_X(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: double + +.Description: + +Returns the longitude of the first point in the geometry. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[x] +-------------------------------------------------- + +[[sql-functions-geo-st-y]] +===== `ST_Y` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_Y(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: double + +.Description: + +Returns the the latitude of the first point in the geometry. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[y] +-------------------------------------------------- + +[[sql-functions-geo-st-z]] +===== `ST_Z` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_Z(geometry<1>) +-------------------------------------------------- + +*Input*: + +<1> geometry + +*Output*: double + +.Description: + +Returns the altitude of the first point in the geometry. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[z] +-------------------------------------------------- + +[[sql-functions-geo-st-distance]] +===== `ST_Distance` + +.Synopsis: +[source, sql] +-------------------------------------------------- +ST_Distance(geometry<1>, geometry<2>) +-------------------------------------------------- + +*Input*: + +<1> source geometry +<2> target geometry + +*Output*: Double + +.Description: + +Returns the distance between geometries in meters. Both geometries have to be points. + +["source","sql",subs="attributes,macros"] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/geo.csv-spec[distance] +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc index 382adeecea4ed..248c47452bab4 100644 --- a/docs/reference/sql/functions/index.asciidoc +++ b/docs/reference/sql/functions/index.asciidoc @@ -136,6 +136,14 @@ ** <> ** <> ** <> +* <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> * <> ** <> ** <> @@ -149,5 +157,6 @@ include::search.asciidoc[] include::math.asciidoc[] include::string.asciidoc[] include::type-conversion.asciidoc[] +include::geo.asciidoc[] include::conditional.asciidoc[] include::system.asciidoc[] diff --git a/docs/reference/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc index 8db4c88f3a11b..ad9b2a320c0c6 100644 --- a/docs/reference/sql/language/data-types.asciidoc +++ b/docs/reference/sql/language/data-types.asciidoc @@ -81,6 +81,8 @@ s|SQL precision | interval_hour_to_minute | 23 | interval_hour_to_second | 23 | interval_minute_to_second | 23 +| geo_point | 52 +| geo_shape | 2,147,483,647 |=== diff --git a/docs/reference/sql/limitations.asciidoc b/docs/reference/sql/limitations.asciidoc index b9c59e31b3d6f..c5b334480c993 100644 --- a/docs/reference/sql/limitations.asciidoc +++ b/docs/reference/sql/limitations.asciidoc @@ -150,3 +150,14 @@ SELECT count(*) FROM test GROUP BY MINUTE((CAST(date_created AS TIME)); ------------------------------------------------------------- SELECT HISTOGRAM(CAST(birth_date AS TIME), INTERVAL '10' MINUTES) as h, COUNT(*) FROM t GROUP BY h ------------------------------------------------------------- + +[float] +[[geo-sql-limitations]] +=== Geo-related functions + +Since `geo_shape` fields don't have doc values these fields cannot be used for filtering, grouping or sorting. + +By default,`geo_points` fields are indexed and have doc values. However only latitude and longitude are stored and +indexed with some loss of precision from the original values (4.190951585769653E-8 for the latitude and +8.381903171539307E-8 for longitude). The altitude component is accepted but not stored in doc values nor indexed. +Therefore calling `ST_Z` function in the filtering, grouping or sorting will return `null`. diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java index 21d1bd9f25564..9299edc459cb7 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java @@ -20,12 +20,18 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.builders.ShapeBuilder; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.BaseGeoShapeFieldMapper; import java.io.IOException; +import java.io.InputStream; /** * first point of entry for a shape parser @@ -67,4 +73,20 @@ static ShapeBuilder parse(XContentParser parser, BaseGeoShapeFieldMapper shapeMa static ShapeBuilder parse(XContentParser parser) throws IOException { return parse(parser, null); } + + static ShapeBuilder parse(Object value) throws IOException { + XContentBuilder content = JsonXContent.contentBuilder(); + content.startObject(); + content.field("value", value); + content.endObject(); + + try (InputStream stream = BytesReference.bytes(content).streamInput(); + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // field value + return parse(parser); + } + } } diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index 14d80ab50ee3f..1d13df3b2c32e 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -16,6 +16,7 @@ ext { // SQL test dependency versions csvjdbcVersion="1.0.34" h2Version="1.4.197" + h2gisVersion="1.5.0" } configurations { diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index 9a15bcf29c0a1..37e0baf00aa71 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -21,6 +21,9 @@ dependencies { compile (project(':libs:x-content')) { transitive = false } + compile (project(':libs:elasticsearch-geo')) { + transitive = false + } compile project(':libs:core') runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "org.elasticsearch.test:framework:${version}" diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java index 52aff352ac182..51a03dad70b55 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/EsType.java @@ -44,7 +44,9 @@ public enum EsType implements SQLType { INTERVAL_DAY_TO_SECOND( ExtraTypes.INTERVAL_DAY_SECOND), INTERVAL_HOUR_TO_MINUTE( ExtraTypes.INTERVAL_HOUR_MINUTE), INTERVAL_HOUR_TO_SECOND( ExtraTypes.INTERVAL_HOUR_SECOND), - INTERVAL_MINUTE_TO_SECOND(ExtraTypes.INTERVAL_MINUTE_SECOND); + INTERVAL_MINUTE_TO_SECOND(ExtraTypes.INTERVAL_MINUTE_SECOND), + GEO_POINT( ExtraTypes.GEOMETRY), + GEO_SHAPE( ExtraTypes.GEOMETRY); private final Integer type; diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java index 3df70f8e1d956..b8f09ece2f3be 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/ExtraTypes.java @@ -29,5 +29,6 @@ private ExtraTypes() {} static final int INTERVAL_HOUR_MINUTE = 111; static final int INTERVAL_HOUR_SECOND = 112; static final int INTERVAL_MINUTE_SECOND = 113; + static final int GEOMETRY = 114; } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java index 9b1ff87596798..5f2f0773ff17a 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.sql.jdbc; import java.util.Objects; @@ -89,4 +90,4 @@ public boolean equals(Object obj) { public int hashCode() { return Objects.hash(name, type, table, catalog, schema, label, displaySize); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java index 1c216d8dba7c7..c9480dbcb1c2b 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java @@ -35,7 +35,7 @@ / Additional properties can be specified either through the Properties object or in the URL. In case of duplicates, the URL wins. */ //TODO: beef this up for Security/SSL -class JdbcConfiguration extends ConnectionConfiguration { +public class JdbcConfiguration extends ConnectionConfiguration { static final String URL_PREFIX = "jdbc:es://"; public static URI DEFAULT_URI = URI.create("http://localhost:9200/"); @@ -47,7 +47,7 @@ class JdbcConfiguration extends ConnectionConfiguration { // can be out/err/url static final String DEBUG_OUTPUT_DEFAULT = "err"; - static final String TIME_ZONE = "timezone"; + public static final String TIME_ZONE = "timezone"; // follow the JDBC spec and use the JVM default... // to avoid inconsistency, the default is picked up once at startup and reused across connections // to cater to the principle of least surprise diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java index 041c457d91b3d..39d942362d731 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcPreparedStatement.java @@ -190,7 +190,7 @@ public void setObject(int parameterIndex, Object x) throws SQLException { setParam(parameterIndex, null, EsType.NULL); return; } - + // check also here the unsupported types so that any unsupported interfaces ({@code java.sql.Struct}, // {@code java.sql.Array} etc) will generate the correct exception message. Otherwise, the method call // {@code TypeConverter.fromJavaToJDBC(x.getClass())} will report the implementing class as not being supported. @@ -330,7 +330,7 @@ public void setNClob(int parameterIndex, Reader reader, long length) throws SQLE public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { setObject(parameterIndex, xmlObject); } - + @Override public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { setObject(parameterIndex, x, TypeUtils.asSqlType(targetSqlType), scaleOrLength); @@ -343,13 +343,12 @@ public void setObject(int parameterIndex, Object x, SQLType targetSqlType, int s private void setObject(int parameterIndex, Object x, EsType dataType, String typeString) throws SQLException { checkOpen(); - // set the null value on the type and exit if (x == null) { setParam(parameterIndex, null, dataType); return; } - + checkKnownUnsupportedTypes(x); if (x instanceof byte[]) { if (dataType != EsType.BINARY) { @@ -359,7 +358,7 @@ private void setObject(int parameterIndex, Object x, EsType dataType, String typ setParam(parameterIndex, x, EsType.BINARY); return; } - + if (x instanceof Timestamp || x instanceof Calendar || x instanceof Date @@ -380,7 +379,7 @@ private void setObject(int parameterIndex, Object x, EsType dataType, String typ LocalDateTime ldt = (LocalDateTime) x; Calendar cal = getDefaultCalendar(); cal.set(ldt.getYear(), ldt.getMonthValue() - 1, ldt.getDayOfMonth(), ldt.getHour(), ldt.getMinute(), ldt.getSecond()); - + dateToSet = cal.getTime(); } else if (x instanceof Time) { dateToSet = new java.util.Date(((Time) x).getTime()); @@ -398,7 +397,7 @@ private void setObject(int parameterIndex, Object x, EsType dataType, String typ throw new SQLFeatureNotSupportedException( "Conversion from type [" + x.getClass().getName() + "] to [" + typeString + "] not supported"); } - + if (x instanceof Boolean || x instanceof Byte || x instanceof Short @@ -412,7 +411,7 @@ private void setObject(int parameterIndex, Object x, EsType dataType, String typ dataType); return; } - + throw new SQLFeatureNotSupportedException( "Conversion from type [" + x.getClass().getName() + "] to [" + typeString + "] not supported"); } @@ -421,14 +420,14 @@ private void checkKnownUnsupportedTypes(Object x) throws SQLFeatureNotSupportedE List> unsupportedTypes = new ArrayList<>(Arrays.asList(Struct.class, Array.class, SQLXML.class, RowId.class, Ref.class, Blob.class, NClob.class, Clob.class, LocalDate.class, LocalTime.class, OffsetTime.class, OffsetDateTime.class, URL.class, BigDecimal.class)); - + for (Class clazz:unsupportedTypes) { if (clazz.isAssignableFrom(x.getClass())) { throw new SQLFeatureNotSupportedException("Objects of type [" + clazz.getName() + "] are not supported"); } } } - + private Calendar getDefaultCalendar() { return Calendar.getInstance(cfg.timeZone(), Locale.ROOT); } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java index 9c30241ccbdb1..7e21f2206b1e9 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java @@ -5,13 +5,16 @@ */ package org.elasticsearch.xpack.sql.jdbc; +import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.proto.StringUtils; +import java.io.IOException; import java.sql.Date; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.sql.Time; import java.sql.Timestamp; +import java.text.ParseException; import java.time.Duration; import java.time.LocalDate; import java.time.LocalDateTime; @@ -100,6 +103,7 @@ private static T dateTimeConvert(Long millis, Calendar c, Function readScriptSpec() throws Exception { + List list = new ArrayList<>(); + list.addAll(GeoCsvSpecTestCase.readScriptSpec()); + list.addAll(readScriptSpec("/single-node-only/command-sys-geo.csv-spec", specParser())); + return list; + } + + public GeoJdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } +} diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java new file mode 100644 index 0000000000000..2a9a1592c71d0 --- /dev/null +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/GeoJdbcSqlSpecIT.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.single_node; + +import org.elasticsearch.xpack.sql.qa.geo.GeoSqlSpecTestCase; + +public class GeoJdbcSqlSpecIT extends GeoSqlSpecTestCase { + public GeoJdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber, query); + } +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java new file mode 100644 index 0000000000000..e40e6de9e3a9c --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoCsvSpecTestCase.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.geo; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.client.Request; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.SpecBaseIntegrationTestCase; +import org.elasticsearch.xpack.sql.jdbc.JdbcConfiguration; +import org.junit.Before; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.csvConnection; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.executeCsvQuery; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; + +/** + * Tests comparing sql queries executed against our jdbc client + * with hard coded result sets. + */ +public abstract class GeoCsvSpecTestCase extends SpecBaseIntegrationTestCase { + private final CsvTestCase testCase; + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + List tests = new ArrayList<>(); + tests.addAll(readScriptSpec("/ogc/ogc.csv-spec", parser)); + tests.addAll(readScriptSpec("/geo/geosql.csv-spec", parser)); + tests.addAll(readScriptSpec("/docs/geo.csv-spec", parser)); + return tests; + } + + public GeoCsvSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber); + this.testCase = testCase; + } + + + @Before + public void setupTestGeoDataIfNeeded() throws Exception { + if (client().performRequest(new Request("HEAD", "/ogc")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadOGCDatasetIntoEs(client(), "ogc"); + } + if (client().performRequest(new Request("HEAD", "/geo")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadGeoDatasetIntoEs(client(), "geo"); + } + } + + @Override + protected final void doTest() throws Throwable { + try (Connection csv = csvConnection(testCase); + Connection es = esJdbc()) { + + // pass the testName as table for debugging purposes (in case the underlying reader is missing) + ResultSet expected = executeCsvQuery(csv, testName); + ResultSet elasticResults = executeJdbcQuery(es, testCase.query); + assertResults(expected, elasticResults); + } + } + + // make sure ES uses UTC (otherwise JDBC driver picks up the JVM timezone per spec/convention) + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } + +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoDataLoader.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoDataLoader.java new file mode 100644 index 0000000000000..40e8f64be87cc --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoDataLoader.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.geo; + +import org.apache.http.HttpHost; +import org.apache.http.HttpStatus; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.sql.qa.jdbc.SqlSpecTestCase; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Map; + +import static org.elasticsearch.xpack.sql.qa.jdbc.DataLoader.createString; +import static org.elasticsearch.xpack.sql.qa.jdbc.DataLoader.readFromJarUrl; + +public class GeoDataLoader { + + public static void main(String[] args) throws Exception { + try (RestClient client = RestClient.builder(new HttpHost("localhost", 9200)).build()) { + loadOGCDatasetIntoEs(client, "ogc"); + loadGeoDatasetIntoEs(client, "geo"); + Loggers.getLogger(GeoDataLoader.class).info("Geo data loaded"); + } + } + + protected static void loadOGCDatasetIntoEs(RestClient client, String index) throws Exception { + createIndex(client, index, createOGCIndexRequest()); + loadData(client, index, readResource("/ogc/ogc.json")); + makeFilteredAlias(client, "lakes", index, "\"term\" : { \"ogc_type\" : \"lakes\" }"); + makeFilteredAlias(client, "road_segments", index, "\"term\" : { \"ogc_type\" : \"road_segments\" }"); + makeFilteredAlias(client, "divided_routes", index, "\"term\" : { \"ogc_type\" : \"divided_routes\" }"); + makeFilteredAlias(client, "forests", index, "\"term\" : { \"ogc_type\" : \"forests\" }"); + makeFilteredAlias(client, "bridges", index, "\"term\" : { \"ogc_type\" : \"bridges\" }"); + makeFilteredAlias(client, "streams", index, "\"term\" : { \"ogc_type\" : \"streams\" }"); + makeFilteredAlias(client, "buildings", index, "\"term\" : { \"ogc_type\" : \"buildings\" }"); + makeFilteredAlias(client, "ponds", index, "\"term\" : { \"ogc_type\" : \"ponds\" }"); + makeFilteredAlias(client, "named_places", index, "\"term\" : { \"ogc_type\" : \"named_places\" }"); + makeFilteredAlias(client, "map_neatlines", index, "\"term\" : { \"ogc_type\" : \"map_neatlines\" }"); + } + + private static String createOGCIndexRequest() throws Exception { + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("settings"); + { + createIndex.field("number_of_shards", 1); + } + createIndex.endObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("properties"); + { + // Common + createIndex.startObject("ogc_type").field("type", "keyword").endObject(); + createIndex.startObject("fid").field("type", "integer").endObject(); + createString("name", createIndex); + + // Type specific + createIndex.startObject("shore").field("type", "geo_shape").endObject(); // lakes + + createString("aliases", createIndex); // road_segments + createIndex.startObject("num_lanes").field("type", "integer").endObject(); // road_segments, divided_routes + createIndex.startObject("centerline").field("type", "geo_shape").endObject(); // road_segments, streams + + createIndex.startObject("centerlines").field("type", "geo_shape").endObject(); // divided_routes + + createIndex.startObject("boundary").field("type", "geo_shape").endObject(); // forests, named_places + + createIndex.startObject("position").field("type", "geo_shape").endObject(); // bridges, buildings + + createString("address", createIndex); // buildings + createIndex.startObject("footprint").field("type", "geo_shape").endObject(); // buildings + + createIndex.startObject("type").field("type", "keyword").endObject(); // ponds + createIndex.startObject("shores").field("type", "geo_shape").endObject(); // ponds + + createIndex.startObject("neatline").field("type", "geo_shape").endObject(); // map_neatlines + + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + return Strings.toString(createIndex); + } + + private static void createIndex(RestClient client, String index, String settingsMappings) throws IOException { + Request createIndexRequest = new Request("PUT", "/" + index); + createIndexRequest.setEntity(new StringEntity(settingsMappings, ContentType.APPLICATION_JSON)); + client.performRequest(createIndexRequest); + } + + static void loadGeoDatasetIntoEs(RestClient client, String index) throws Exception { + createIndex(client, index, readResource("/geo/geosql.json")); + loadData(client, index, readResource("/geo/geosql-bulk.json")); + } + + private static void loadData(RestClient client, String index, String bulk) throws IOException { + Request request = new Request("POST", "/" + index + "/_bulk"); + request.addParameter("refresh", "true"); + request.setJsonEntity(bulk); + Response response = client.performRequest(request); + + if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { + throw new RuntimeException("Cannot load data " + response.getStatusLine()); + } + + String bulkResponseStr = EntityUtils.toString(response.getEntity()); + Map bulkResponseMap = XContentHelper.convertToMap(JsonXContent.jsonXContent, bulkResponseStr, false); + + if ((boolean) bulkResponseMap.get("errors")) { + throw new RuntimeException("Failed to load bulk data " + bulkResponseStr); + } + } + + + public static void makeFilteredAlias(RestClient client, String aliasName, String index, String filter) throws Exception { + Request request = new Request("POST", "/" + index + "/_alias/" + aliasName); + request.setJsonEntity("{\"filter\" : { " + filter + " } }"); + client.performRequest(request); + } + + private static String readResource(String location) throws IOException { + URL dataSet = SqlSpecTestCase.class.getResource(location); + if (dataSet == null) { + throw new IllegalArgumentException("Can't find [" + location + "]"); + } + StringBuilder builder = new StringBuilder(); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(readFromJarUrl(dataSet), StandardCharsets.UTF_8))) { + String line = reader.readLine(); + while(line != null) { + if (line.trim().startsWith("//") == false) { + builder.append(line); + builder.append('\n'); + } + line = reader.readLine(); + } + return builder.toString(); + } + } + +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java new file mode 100644 index 0000000000000..405efac5cac35 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.qa.geo; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.client.Request; +import org.elasticsearch.xpack.sql.qa.jdbc.LocalH2; +import org.elasticsearch.xpack.sql.qa.jdbc.SpecBaseIntegrationTestCase; +import org.elasticsearch.xpack.sql.jdbc.JdbcConfiguration; +import org.h2gis.functions.factory.H2GISFunctions; +import org.junit.Before; +import org.junit.ClassRule; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.text.NumberFormat; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Properties; + +/** + * Tests comparing geo sql queries executed against our jdbc client + * with those executed against H2GIS's jdbc client. + */ +public abstract class GeoSqlSpecTestCase extends SpecBaseIntegrationTestCase { + private String query; + + @ClassRule + public static LocalH2 H2 = new LocalH2((c) -> { + // Load GIS extensions + H2GISFunctions.load(c); + c.createStatement().execute("RUNSCRIPT FROM 'classpath:/ogc/sqltsch.sql'"); + c.createStatement().execute("RUNSCRIPT FROM 'classpath:/geo/setup_test_geo.sql'"); + }); + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = new SqlSpecParser(); + List tests = new ArrayList<>(); + tests.addAll(readScriptSpec("/ogc/ogc.sql-spec", parser)); + tests.addAll(readScriptSpec("/geo/geosql.sql-spec", parser)); + return tests; + } + + @Before + public void setupTestGeoDataIfNeeded() throws Exception { + assumeTrue("Cannot support locales that don't use Hindu-Arabic numerals and non-ascii - sign due to H2", + "-42".equals(NumberFormat.getInstance(Locale.getDefault()).format(-42))); + if (client().performRequest(new Request("HEAD", "/ogc")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadOGCDatasetIntoEs(client(), "ogc"); + } + if (client().performRequest(new Request("HEAD", "/geo")).getStatusLine().getStatusCode() == 404) { + GeoDataLoader.loadGeoDatasetIntoEs(client(), "geo"); + } + } + + + private static class SqlSpecParser implements Parser { + @Override + public Object parse(String line) { + return line.endsWith(";") ? line.substring(0, line.length() - 1) : line; + } + } + + public GeoSqlSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber); + this.query = query; + } + + @Override + protected final void doTest() throws Throwable { + try (Connection h2 = H2.get(); + Connection es = esJdbc()) { + + ResultSet expected, elasticResults; + expected = executeJdbcQuery(h2, query); + elasticResults = executeJdbcQuery(es, query); + + assertResults(expected, elasticResults); + } + } + + // TODO: use UTC for now until deciding on a strategy for handling date extraction + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java index 6376bd13308d6..daa4e5b4d0c87 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java @@ -46,7 +46,7 @@ private CsvTestUtils() { */ public static ResultSet executeCsvQuery(Connection csv, String csvTableName) throws SQLException { ResultSet expected = csv.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY) - .executeQuery("SELECT * FROM " + csvTableName); + .executeQuery("SELECT * FROM " + csvTableName); // trigger data loading for type inference expected.beforeFirst(); return expected; @@ -187,13 +187,13 @@ public Object parse(String line) { } else { if (line.endsWith(";")) { - // pick up the query - testCase = new CsvTestCase(); - query.append(line.substring(0, line.length() - 1).trim()); - testCase.query = query.toString(); - testCase.earlySchema = earlySchema.toString(); - earlySchema.setLength(0); - query.setLength(0); + // pick up the query + testCase = new CsvTestCase(); + query.append(line.substring(0, line.length() - 1).trim()); + testCase.query = query.toString(); + testCase.earlySchema = earlySchema.toString(); + earlySchema.setLength(0); + query.setLength(0); } // keep reading the query else { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java index 774a406da863c..ff50a33a0afe8 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java @@ -63,7 +63,7 @@ public static void loadDocsDatasetIntoEs(RestClient client) throws Exception { freeze(client, "archive"); } - private static void createString(String name, XContentBuilder builder) throws Exception { + public static void createString(String name, XContentBuilder builder) throws Exception { builder.startObject(name).field("type", "text") .startObject("fields") .startObject("keyword").field("type", "keyword").endObject() @@ -292,7 +292,7 @@ protected static void loadLibDatasetIntoEs(RestClient client, String index) thro Response response = client.performRequest(request); } - protected static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception { + public static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception { for (String index : indices) { client.performRequest(new Request("POST", "/" + index + "/_alias/" + aliasName)); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java index 8931fe0264e9d..76894fc5a53d5 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java @@ -8,18 +8,25 @@ import com.carrotsearch.hppc.IntObjectHashMap; import org.apache.logging.log4j.Logger; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.xpack.sql.jdbc.EsType; import org.elasticsearch.xpack.sql.proto.StringUtils; import org.relique.jdbc.csv.CsvResultSet; +import java.io.IOException; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Types; +import java.text.ParseException; import java.time.temporal.TemporalAmount; import java.util.ArrayList; +import java.util.Calendar; import java.util.List; import java.util.Locale; +import java.util.TimeZone; import static java.lang.String.format; import static java.sql.Types.BIGINT; @@ -29,6 +36,8 @@ import static java.sql.Types.REAL; import static java.sql.Types.SMALLINT; import static java.sql.Types.TINYINT; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; @@ -38,6 +47,7 @@ * Utility class for doing JUnit-style asserts over JDBC. */ public class JdbcAssert { + private static final Calendar UTC_CALENDAR = Calendar.getInstance(TimeZone.getTimeZone("UTC"), Locale.ROOT); private static final IntObjectHashMap SQL_TO_TYPE = new IntObjectHashMap<>(); @@ -139,6 +149,11 @@ public static void assertResultSetMetadata(ResultSet expected, ResultSet actual, expectedType = Types.TIMESTAMP; } + // H2 treats GEOMETRY as OTHER + if (expectedType == Types.OTHER && nameOf(actualType).startsWith("GEO_") ) { + actualType = Types.OTHER; + } + // since csv doesn't support real, we use float instead..... if (expectedType == Types.FLOAT && expected instanceof CsvResultSet) { expectedType = Types.REAL; @@ -251,6 +266,24 @@ else if (type == Types.DOUBLE) { assertEquals(msg, (double) expectedObject, (double) actualObject, lenientFloatingNumbers ? 1d : 0.0d); } else if (type == Types.FLOAT) { assertEquals(msg, (float) expectedObject, (float) actualObject, lenientFloatingNumbers ? 1f : 0.0f); + } else if (type == Types.OTHER) { + if (actualObject instanceof Geometry) { + // We need to convert the expected object to libs/geo Geometry for comparision + try { + expectedObject = WellKnownText.fromWKT(expectedObject.toString()); + } catch (IOException | ParseException ex) { + fail(ex.getMessage()); + } + } + if (actualObject instanceof Point) { + // geo points are loaded form doc values where they are stored as long-encoded values leading + // to lose in precision + assertThat(expectedObject, instanceOf(Point.class)); + assertEquals(((Point) expectedObject).getLat(), ((Point) actualObject).getLat(), 0.000001d); + assertEquals(((Point) expectedObject).getLon(), ((Point) actualObject).getLon(), 0.000001d); + } else { + assertEquals(msg, expectedObject, actualObject); + } } // intervals else if (type == Types.VARCHAR && actualObject instanceof TemporalAmount) { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java index e6295985cf519..2f3ce7eaddd88 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java @@ -81,4 +81,4 @@ protected void after() { public Connection get() throws SQLException { return DriverManager.getConnection(url, DEFAULTS); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec index 9f63de97c9928..073788511d0f0 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec @@ -129,11 +129,20 @@ RIGHT |SCALAR RTRIM |SCALAR SPACE |SCALAR SUBSTRING |SCALAR -UCASE |SCALAR +UCASE |SCALAR CAST |SCALAR CONVERT |SCALAR DATABASE |SCALAR USER |SCALAR +ST_ASTEXT |SCALAR +ST_ASWKT |SCALAR +ST_DISTANCE |SCALAR +ST_GEOMETRYTYPE |SCALAR +ST_GEOMFROMTEXT |SCALAR +ST_WKTTOSQL |SCALAR +ST_X |SCALAR +ST_Y |SCALAR +ST_Z |SCALAR SCORE |SCORE ; diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index c2432007bff35..936c7eef88191 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -201,7 +201,7 @@ showFunctions // tag::showFunctions SHOW FUNCTIONS; - name | type + name | type -----------------+--------------- AVG |AGGREGATE COUNT |AGGREGATE @@ -325,13 +325,21 @@ RIGHT |SCALAR RTRIM |SCALAR SPACE |SCALAR SUBSTRING |SCALAR -UCASE |SCALAR +UCASE |SCALAR CAST |SCALAR CONVERT |SCALAR DATABASE |SCALAR USER |SCALAR +ST_ASTEXT |SCALAR +ST_ASWKT |SCALAR +ST_DISTANCE |SCALAR +ST_GEOMETRYTYPE |SCALAR +ST_GEOMFROMTEXT |SCALAR +ST_WKTTOSQL |SCALAR +ST_X |SCALAR +ST_Y |SCALAR +ST_Z |SCALAR SCORE |SCORE - // end::showFunctions ; diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/geo.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/geo.csv-spec new file mode 100644 index 0000000000000..60fbebfc13950 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/geo.csv-spec @@ -0,0 +1,79 @@ +// +// CSV spec used by the geo docs +// + +/////////////////////////////// +// +// ST_AsWKT() +// +/////////////////////////////// + +selectAsWKT +// tag::aswkt +SELECT city, ST_AsWKT(location) location FROM "geo" WHERE city = 'Amsterdam'; + + city:s | location:s +Amsterdam |point (4.850311987102032 52.347556999884546) +// end::aswkt +; + +selectWKTToSQL +// tag::wkttosql +SELECT CAST(ST_WKTToSQL('POINT (10 20)') AS STRING) location; + + location:s +point (10.0 20.0) +// end::wkttosql +; + + +selectDistance +// tag::distance +SELECT ST_Distance(ST_WKTToSQL('POINT (10 20)'), ST_WKTToSQL('POINT (20 30)')) distance; + + distance:d +1499101.2889383635 +// end::distance +; + +/////////////////////////////// +// +// Geometry Properties +// +/////////////////////////////// + +selectGeometryType +// tag::geometrytype +SELECT ST_GeometryType(ST_WKTToSQL('POINT (10 20)')) type; + + type:s +POINT +// end::geometrytype +; + +selectX +// tag::x +SELECT ST_X(ST_WKTToSQL('POINT (10 20)')) x; + + x:d +10.0 +// end::x +; + +selectY +// tag::y +SELECT ST_Y(ST_WKTToSQL('POINT (10 20)')) y; + + y:d +20.0 +// end::y +; + +selectZ +// tag::z +SELECT ST_Z(ST_WKTToSQL('POINT (10 20 30)')) z; + + z:d +30.0 +// end::z +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv b/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv new file mode 100644 index 0000000000000..8275bd7c884ef --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv @@ -0,0 +1,16 @@ +city,region,region_point,location,shape +Mountain View,Americas,POINT(-105.2551 54.5260),point (-122.083843 37.386483),point (-122.083843 37.386483) +Chicago,Americas,POINT(-105.2551 54.5260),point (-87.637874 41.888783),point (-87.637874 41.888783) +New York,Americas,POINT(-105.2551 54.5260),point (-73.990027 40.745171),point (-73.990027 40.745171) +San Francisco,Americas,POINT(-105.2551 54.5260),point (-122.394228 37.789541),point (-122.394228 37.789541) +Phoenix,Americas,POINT(-105.2551 54.5260),point (-111.973505 33.376242),point (-111.973505 33.376242) +Amsterdam,Europe,POINT(15.2551 54.5260),point (4.850312 52.347557),point (4.850312 52.347557) +Berlin,Europe,POINT(15.2551 54.5260),point (13.390889 52.486701),point (13.390889 52.486701) +Munich,Europe,POINT(15.2551 54.5260),point (11.537505 48.146321),point (11.537505 48.146321) +London,Europe,POINT(15.2551 54.5260),point (-0.121672 51.510871),point (-0.121672 51.510871) +Paris,Europe,POINT(15.2551 54.5260),point (2.351773 48.845538),point (2.351773 48.845538) +Singapore,Asia,POINT(100.6197 34.0479),point (103.855535 1.295868),point (103.855535 1.295868) +Hong Kong,Asia,POINT(100.6197 34.0479),point (114.183925 22.281397),point (114.183925 22.281397) +Seoul,Asia,POINT(100.6197 34.0479),point (127.060851 37.509132),point (127.060851 37.509132) +Tokyo,Asia,POINT(100.6197 34.0479),point (139.76402225 35.669616),point (139.76402225 35.669616) +Sydney,Asia,POINT(100.6197 34.0479),point (151.208629 -33.863385),point (151.208629 -33.863385) diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql-bulk.json b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql-bulk.json new file mode 100644 index 0000000000000..8c65742aac063 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql-bulk.json @@ -0,0 +1,33 @@ +{"index":{"_id": "1"}} +{"region": "Americas", "city": "Mountain View", "location": {"lat":"37.386483", "lon":"-122.083843"}, "location_no_dv": {"lat":"37.386483", "lon":"-122.083843"}, "shape": "POINT (-122.083843 37.386483 30)", "region_point": "POINT(-105.2551 54.5260)"} +{"index":{"_id": "2"}} +{"region": "Americas", "city": "Chicago", "location": [-87.637874, 41.888783], "location_no_dv": [-87.637874, 41.888783], "shape": {"type" : "point", "coordinates" : [-87.637874, 41.888783, 181]}, "region_point": "POINT(-105.2551 54.5260)"} +{"index":{"_id": "3"}} +{"region": "Americas", "city": "New York", "location": "40.745171,-73.990027", "location_no_dv": "40.745171,-73.990027", "shape": "POINT (-73.990027 40.745171 10)", "region_point": "POINT(-105.2551 54.5260)"} +{"index":{"_id": "4"}} +{"region": "Americas", "city": "San Francisco", "location": "37.789541,-122.394228", "location_no_dv": "37.789541,-122.394228", "shape": "POINT (-122.394228 37.789541 16)", "region_point": "POINT(-105.2551 54.5260)"} +{"index":{"_id": "5"}} +{"region": "Americas", "city": "Phoenix", "location": "33.376242,-111.973505", "location_no_dv": "33.376242,-111.973505", "shape": "POINT (-111.973505 33.376242 331)", "region_point": "POINT(-105.2551 54.5260)"} +{"index":{"_id": "6"}} +{"region": "Europe", "city": "Amsterdam", "location": "52.347557,4.850312", "location_no_dv": "52.347557,4.850312", "shape": "POINT (4.850312 52.347557 2)", "region_point": "POINT(15.2551 54.5260)"} +{"index":{"_id": "7"}} +{"region": "Europe", "city": "Berlin", "location": "52.486701,13.390889", "location_no_dv": "52.486701,13.390889", "shape": "POINT (13.390889 52.486701 34)", "region_point": "POINT(15.2551 54.5260)"} +{"index":{"_id": "8"}} +{"region": "Europe", "city": "Munich", "location": "48.146321,11.537505", "location_no_dv": "48.146321,11.537505", "shape": "POINT (11.537505 48.146321 519)", "region_point": "POINT(15.2551 54.5260)"} +{"index":{"_id": "9"}} +{"region": "Europe", "city": "London", "location": "51.510871,-0.121672", "location_no_dv": "51.510871,-0.121672", "shape": "POINT (-0.121672 51.510871 11)", "region_point": "POINT(15.2551 54.5260)"} +{"index":{"_id": "10"}} +{"region": "Europe", "city": "Paris", "location": "48.845538,2.351773", "location_no_dv": "48.845538,2.351773", "shape": "POINT (2.351773 48.845538 35)", "region_point": "POINT(15.2551 54.5260)"} +{"index":{"_id": "11"}} +{"region": "Asia", "city": "Singapore", "location": "1.295868,103.855535", "location_no_dv": "1.295868,103.855535", "shape": "POINT (103.855535 1.295868 15)", "region_point": "POINT(100.6197 34.0479)"} +{"index":{"_id": "12"}} +{"region": "Asia", "city": "Hong Kong", "location": "22.281397,114.183925", "location_no_dv": "22.281397,114.183925", "shape": "POINT (114.183925 22.281397 552)", "region_point": "POINT(100.6197 34.0479)"} +{"index":{"_id": "13"}} +{"region": "Asia", "city": "Seoul", "location": "37.509132,127.060851", "location_no_dv": "37.509132,127.060851", "shape": "POINT (127.060851 37.509132 38)", "region_point": "POINT(100.6197 34.0479)"} +{"index":{"_id": "14"}} +{"region": "Asia", "city": "Tokyo", "location": "35.669616,139.76402225", "location_no_dv": "35.669616,139.76402225", "shape": "POINT (139.76402225 35.669616 40)", "region_point": "POINT(100.6197 34.0479)"} +{"index":{"_id": "15"}} +{"region": "Asia", "city": "Sydney", "location": "-33.863385,151.208629", "location_no_dv": "-33.863385,151.208629", "shape": "POINT (151.208629 -33.863385 100)", "region_point": "POINT(100.6197 34.0479)"} + + + diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec new file mode 100644 index 0000000000000..31f3857216c0b --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.csv-spec @@ -0,0 +1,288 @@ +// +// Commands on geo test data +// + +showTables +SHOW TABLES "geo"; + + name:s | type:s | kind:s +geo |BASE TABLE |INDEX +; + +// DESCRIBE + +describe +DESCRIBE "geo"; + + column:s | type:s | mapping:s +city | VARCHAR | keyword +location | GEOMETRY | geo_point +location_no_dv | GEOMETRY | geo_point +region | VARCHAR | keyword +region_point | VARCHAR | keyword +shape | GEOMETRY | geo_shape +; + +// SELECT ALL +// TODO: For now we just get geopoint formatted as is and we also need to convert it to STRING to work with CSV + +selectAllPointsAsStrings +SELECT city, CAST(location AS STRING) location, CAST(location_no_dv AS STRING) location_no_dv, CAST(shape AS STRING) shape, region FROM "geo" ORDER BY "city"; + + city:s | location:s | location_no_dv:s | shape:s | region:s +Amsterdam |point (4.850311987102032 52.347556999884546) |point (4.850312 52.347557) |point (4.850312 52.347557 2.0) |Europe +Berlin |point (13.390888944268227 52.48670099303126) |point (13.390889 52.486701) |point (13.390889 52.486701 34.0) |Europe +Chicago |point (-87.63787407428026 41.888782968744636) |point (-87.637874 41.888783) |point (-87.637874 41.888783 181.0) |Americas +Hong Kong |point (114.18392493389547 22.28139698971063) |point (114.183925 22.281397) |point (114.183925 22.281397 552.0) |Asia +London |point (-0.12167204171419144 51.51087098289281)|point (-0.121672 51.510871) |point (-0.121672 51.510871 11.0) |Europe +Mountain View |point (-122.08384302444756 37.38648299127817) |point (-122.083843 37.386483) |point (-122.083843 37.386483 30.0) |Americas +Munich |point (11.537504978477955 48.14632098656148) |point (11.537505 48.146321) |point (11.537505 48.146321 519.0) |Europe +New York |point (-73.9900270756334 40.74517097789794) |point (-73.990027 40.745171) |point (-73.990027 40.745171 10.0) |Americas +Paris |point (2.3517729341983795 48.84553796611726) |point (2.351773 48.845538) |point (2.351773 48.845538 35.0) |Europe +Phoenix |point (-111.97350500151515 33.37624196894467) |point (-111.973505 33.376242) |point (-111.973505 33.376242 331.0) |Americas +San Francisco |point (-122.39422800019383 37.789540970698) |point (-122.394228 37.789541) |point (-122.394228 37.789541 16.0) |Americas +Seoul |point (127.06085099838674 37.50913198571652) |point (127.060851 37.509132) |point (127.060851 37.509132 38.0) |Asia +Singapore |point (103.8555349688977 1.2958679627627134) |point (103.855535 1.295868) |point (103.855535 1.295868 15.0) |Asia +Sydney |point (151.20862897485495 -33.863385021686554)|point (151.208629 -33.863385) |point (151.208629 -33.863385 100.0) |Asia +Tokyo |point (139.76402222178876 35.66961596254259) |point (139.76402225 35.669616)|point (139.76402225 35.669616 40.0) |Asia +; + +// TODO: Both shape and location contain the same data for now, we should change it later to make things more interesting +selectAllPointsAsWKT +SELECT city, ST_ASWKT(location) location_wkt, ST_ASWKT(shape) shape_wkt, region FROM "geo" ORDER BY "city"; + + city:s | location_wkt:s | shape_wkt:s | region:s +Amsterdam |point (4.850311987102032 52.347556999884546) |point (4.850312 52.347557 2.0) |Europe +Berlin |point (13.390888944268227 52.48670099303126) |point (13.390889 52.486701 34.0) |Europe +Chicago |point (-87.63787407428026 41.888782968744636) |point (-87.637874 41.888783 181.0) |Americas +Hong Kong |point (114.18392493389547 22.28139698971063) |point (114.183925 22.281397 552.0) |Asia +London |point (-0.12167204171419144 51.51087098289281)|point (-0.121672 51.510871 11.0) |Europe +Mountain View |point (-122.08384302444756 37.38648299127817) |point (-122.083843 37.386483 30.0) |Americas +Munich |point (11.537504978477955 48.14632098656148) |point (11.537505 48.146321 519.0) |Europe +New York |point (-73.9900270756334 40.74517097789794) |point (-73.990027 40.745171 10.0) |Americas +Paris |point (2.3517729341983795 48.84553796611726) |point (2.351773 48.845538 35.0) |Europe +Phoenix |point (-111.97350500151515 33.37624196894467) |point (-111.973505 33.376242 331.0) |Americas +San Francisco |point (-122.39422800019383 37.789540970698) |point (-122.394228 37.789541 16.0) |Americas +Seoul |point (127.06085099838674 37.50913198571652) |point (127.060851 37.509132 38.0) |Asia +Singapore |point (103.8555349688977 1.2958679627627134) |point (103.855535 1.295868 15.0) |Asia +Sydney |point (151.20862897485495 -33.863385021686554)|point (151.208629 -33.863385 100.0) |Asia +Tokyo |point (139.76402222178876 35.66961596254259) |point (139.76402225 35.669616 40.0) |Asia +; + +selectWithAsWKTInWhere +SELECT city, ST_ASWKT(location) location_wkt, region FROM "geo" WHERE LOCATE('114', ST_ASWKT(location)) > 0 ORDER BY "city"; + + city:s | location_wkt:s | region:s +Hong Kong |point (114.18392493389547 22.28139698971063)|Asia +; + +selectAllPointsOrderByLonFromAsWKT +SELECT city, SUBSTRING(ST_ASWKT(location), 8, LOCATE(' ', ST_ASWKT(location), 8) - 8) lon FROM "geo" ORDER BY lon; + + city:s | lon:s +London |-0.12167204171419144 +Phoenix |-111.97350500151515 +Mountain View |-122.08384302444756 +San Francisco |-122.39422800019383 +New York |-73.9900270756334 +Chicago |-87.63787407428026 +Singapore |103.8555349688977 +Munich |11.537504978477955 +Hong Kong |114.18392493389547 +Seoul |127.06085099838674 +Berlin |13.390888944268227 +Tokyo |139.76402222178876 +Sydney |151.20862897485495 +Paris |2.3517729341983795 +Amsterdam |4.850311987102032 +; + +selectAllPointsGroupByHemisphereFromAsWKT +SELECT COUNT(city) count, CAST(SUBSTRING(ST_ASWKT(location), 8, 1) = '-' AS STRING) west FROM "geo" GROUP BY west ORDER BY west; + + count:l | west:s +9 |false +6 |true +; + +selectRegionUsingWktToSql +SELECT region, city, ST_ASWKT(ST_WKTTOSQL(region_point)) region_wkt FROM geo ORDER BY region, city; + + region:s | city:s | region_wkt:s +Americas |Chicago |point (-105.2551 54.526) +Americas |Mountain View |point (-105.2551 54.526) +Americas |New York |point (-105.2551 54.526) +Americas |Phoenix |point (-105.2551 54.526) +Americas |San Francisco |point (-105.2551 54.526) +Asia |Hong Kong |point (100.6197 34.0479) +Asia |Seoul |point (100.6197 34.0479) +Asia |Singapore |point (100.6197 34.0479) +Asia |Sydney |point (100.6197 34.0479) +Asia |Tokyo |point (100.6197 34.0479) +Europe |Amsterdam |point (15.2551 54.526) +Europe |Berlin |point (15.2551 54.526) +Europe |London |point (15.2551 54.526) +Europe |Munich |point (15.2551 54.526) +Europe |Paris |point (15.2551 54.526) +; + +selectCitiesWithAGroupByWktToSql +SELECT COUNT(city) city_by_region, CAST(ST_WKTTOSQL(region_point) AS STRING) region FROM geo WHERE city LIKE '%a%' GROUP BY ST_WKTTOSQL(region_point) ORDER BY ST_WKTTOSQL(region_point); + + city_by_region:l | region:s +3 |point (-105.2551 54.526) +1 |point (100.6197 34.0479) +2 |point (15.2551 54.526) +; + +selectCitiesWithEOrderByWktToSql +SELECT region, city FROM geo WHERE city LIKE '%e%' ORDER BY ST_WKTTOSQL(region_point), city; + + region:s | city:s +Americas |Mountain View +Americas |New York +Americas |Phoenix +Asia |Seoul +Asia |Singapore +Asia |Sydney +Europe |Amsterdam +Europe |Berlin +; + + +selectCitiesByDistance +SELECT region, city, ST_Distance(location, ST_WktToSQL('POINT (-71 42)')) distance FROM geo WHERE distance < 5000000 ORDER BY region, city; + + region:s | city:s | distance:d +Americas |Chicago |1373941.5140200066 +Americas |Mountain View |4335936.909375596 +Americas |New York |285839.6579622518 +Americas |Phoenix |3692895.0346903414 +Americas |San Francisco |4343565.010996301 +; + +selectCitiesByDistanceFloored +SELECT region, city, FLOOR(ST_Distance(location, ST_WktToSQL('POINT (-71 42)'))) distance FROM geo WHERE distance < 5000000 ORDER BY region, city; + + region:s | city:s | distance:l +Americas |Chicago |1373941 +Americas |Mountain View |4335936 +Americas |New York |285839 +Americas |Phoenix |3692895 +Americas |San Francisco |4343565 +; + +selectCitiesOrderByDistance +SELECT region, city FROM geo ORDER BY ST_Distance(location, ST_WktToSQL('POINT (-71 42)')) ; + + region:s | city:s +Americas |New York +Americas |Chicago +Americas |Phoenix +Americas |Mountain View +Americas |San Francisco +Europe |London +Europe |Paris +Europe |Amsterdam +Europe |Berlin +Europe |Munich +Asia |Tokyo +Asia |Seoul +Asia |Hong Kong +Asia |Singapore +Asia |Sydney +; + +groupCitiesByDistance +SELECT COUNT(*) count, FIRST(region) region FROM geo GROUP BY FLOOR(ST_Distance(location, ST_WktToSQL('POINT (-71 42)'))/5000000); + + count:l | region:s +5 |Americas +5 |Europe +3 |Asia +2 |Asia +; + +selectWktToSqlOfNull +SELECT ST_ASWKT(ST_WktToSql(NULL)) shape; + shape:s +null +; + +selectWktToSqlOfNull +SELECT ST_Distance(ST_WktToSql(NULL), ST_WktToSQL('POINT (-71 42)')) shape; + shape:d +null +; + +groupByGeometryType +SELECT COUNT(*) cnt, ST_GeometryType(location) gt FROM geo GROUP BY ST_GeometryType(location); + + cnt:l | gt:s +15 |POINT +; + + +groupAndOrderByGeometryType +SELECT COUNT(*) cnt, ST_GeometryType(location) gt FROM geo GROUP BY gt ORDER BY gt; + + cnt:l | gt:s +15 |POINT +; + +groupByEastWest +SELECT COUNT(*) cnt, FLOOR(ST_X(location)/90) east FROM geo GROUP BY east ORDER BY east; + + cnt:l | east:l +3 |-2 +3 |-1 +4 |0 +5 |1 +; + +groupByNorthSouth +SELECT COUNT(*) cnt, FLOOR(ST_Y(location)/45) north FROM geo GROUP BY north ORDER BY north; + + cnt:l | north:l +1 |-1 +9 |0 +5 |1 +; + +groupByNorthEastSortByEastNorth +SELECT COUNT(*) cnt, FLOOR(ST_Y(location)/45) north, FLOOR(ST_X(location)/90) east FROM geo GROUP BY north, east ORDER BY east, north; + + cnt:l | north:l | east:l +3 |0 |-2 +2 |0 |-1 +1 |1 |-1 +4 |1 |0 +1 |-1 |1 +4 |0 |1 +; + +selectFilterByXOfLocation +SELECT city, ST_X(shape) x, ST_Y(shape) y, ST_Z(shape) z, ST_X(location) lx, ST_Y(location) ly FROM geo WHERE lx > 0 ORDER BY ly; + + city:s | x:d | y:d | z:d | lx:d | ly:d +Sydney |151.208629 |-33.863385 |100.0 |151.20862897485495|-33.863385021686554 +Singapore |103.855535 |1.295868 |15.0 |103.8555349688977 |1.2958679627627134 +Hong Kong |114.183925 |22.281397 |552.0 |114.18392493389547|22.28139698971063 +Tokyo |139.76402225 |35.669616 |40.0 |139.76402222178876|35.66961596254259 +Seoul |127.060851 |37.509132 |38.0 |127.06085099838674|37.50913198571652 +Munich |11.537505 |48.146321 |519.0 |11.537504978477955|48.14632098656148 +Paris |2.351773 |48.845538 |35.0 |2.3517729341983795|48.84553796611726 +Amsterdam |4.850312 |52.347557 |2.0 |4.850311987102032 |52.347556999884546 +Berlin |13.390889 |52.486701 |34.0 |13.390888944268227|52.48670099303126 +; + +selectFilterByRegionPoint +SELECT city, region, ST_X(location) x FROM geo WHERE ST_X(ST_WKTTOSQL(region_point)) < 0 ORDER BY x; + + city:s | region:s | x:d +San Francisco |Americas |-122.39422800019383 +Mountain View |Americas |-122.08384302444756 +Phoenix |Americas |-111.97350500151515 +Chicago |Americas |-87.63787407428026 +New York |Americas |-73.9900270756334 +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.json b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.json new file mode 100644 index 0000000000000..56007a0284c43 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.json @@ -0,0 +1,28 @@ +{ + "settings": { + "number_of_shards": 1 + }, + "mappings": { + "properties": { + "region": { + "type": "keyword" + }, + "city": { + "type": "keyword" + }, + "location": { + "type": "geo_point" + }, + "location_no_dv": { + "type": "geo_point", + "doc_values": "false" + }, + "shape": { + "type": "geo_shape" + }, + "region_point": { + "type": "keyword" + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.sql-spec new file mode 100644 index 0000000000000..e801d8477f6bf --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geosql.sql-spec @@ -0,0 +1,24 @@ +// +// Commands on geo test data +// + +selectAllShapesAsGeometries +SELECT city, shape, region FROM "geo" ORDER BY "city"; + +selectAllShapesAsWKT +SELECT city, ST_GEOMFROMTEXT(ST_ASWKT(shape)) shape_wkt, region FROM "geo" ORDER BY "city"; + +selectAllPointsAsGeometries +SELECT city, location, region FROM "geo" ORDER BY "city"; + +selectAllPointsAsWKT +SELECT city, ST_GEOMFROMTEXT(ST_ASWKT(location)) shape_wkt, region FROM "geo" ORDER BY "city"; + +selectRegionUsingWktToSqlWithoutConvertion +SELECT region, city, shape, ST_GEOMFROMTEXT(region_point) region_wkt FROM geo ORDER BY region, city; + +selectCitiesWithGroupByWktToSql +SELECT COUNT(city) city_by_region, ST_GEOMFROMTEXT(region_point) region_geom FROM geo WHERE city LIKE '%a%' GROUP BY region_geom ORDER BY city_by_region; + +selectCitiesWithOrderByWktToSql +SELECT region, city, UCASE(ST_ASWKT(ST_GEOMFROMTEXT(region_point))) region_wkt FROM geo WHERE city LIKE '%e%' ORDER BY region_wkt, city; diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/setup_test_geo.sql b/x-pack/plugin/sql/qa/src/main/resources/geo/setup_test_geo.sql new file mode 100644 index 0000000000000..b8b8d4e36f453 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/setup_test_geo.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS "geo"; +CREATE TABLE "geo" ( + "city" VARCHAR(50), + "region" VARCHAR(50), + "region_point" VARCHAR(50), + "location" POINT, + "shape" GEOMETRY +) + AS SELECT * FROM CSVREAD('classpath:/geo/geo.csv'); diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/OGC-NOTICE.txt b/x-pack/plugin/sql/qa/src/main/resources/ogc/OGC-NOTICE.txt new file mode 100644 index 0000000000000..ac061f5cc4493 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/OGC-NOTICE.txt @@ -0,0 +1,41 @@ +Software Notice + +This OGC work (including software, documents, or other related items) is being +provided by the copyright holders under the following license. By obtaining, +using and/or copying this work, you (the licensee) agree that you have read, +understood, and will comply with the following terms and conditions: + +Permission to use, copy, and modify this software and its documentation, with +or without modification, for any purpose and without fee or royalty is hereby +granted, provided that you include the following on ALL copies of the software +and documentation or portions thereof, including modifications, that you make: + +1. The full text of this NOTICE in a location viewable to users of the +redistributed or derivative work. + +2. Any pre-existing intellectual property disclaimers, notices, or terms and +conditions. If none exist, a short notice of the following form (hypertext is +preferred, text is permitted) should be used within the body of any +redistributed or derivative code: "Copyright © [$date-of-document] Open +Geospatial Consortium, Inc. All Rights Reserved. +http://www.opengeospatial.org/ogc/legal (Hypertext is preferred, but a textual +representation is permitted.) + +3. Notice of any changes or modifications to the OGC files, including the date +changes were made. (We recommend you provide URIs to the location from which +the code is derived.) + + +THIS SOFTWARE AND DOCUMENTATION IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS MAKE +NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO, WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT +THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY THIRD PARTY +ATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. + +COPYRIGHT HOLDERS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF ANY USE OF THE SOFTWARE OR DOCUMENTATION. + +The name and trademarks of copyright holders may NOT be used in advertising or +publicity pertaining to the software without specific, written prior permission. +Title to copyright in this software and any associated documentation will at all +times remain with copyright holders. \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.csv-spec new file mode 100644 index 0000000000000..f1941161697d2 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.csv-spec @@ -0,0 +1,36 @@ +// +// Commands on OGC data +// + +showTables +SHOW TABLES "ogc"; + + name:s | type:s | kind:s +ogc |BASE TABLE |INDEX +; + +// DESCRIBE + +describe +DESCRIBE "ogc"; + + column:s | type:s | mapping:s +address | VARCHAR | text +address.keyword | VARCHAR | keyword +aliases | VARCHAR | text +aliases.keyword | VARCHAR | keyword +boundary | GEOMETRY | geo_shape +centerline | GEOMETRY | geo_shape +centerlines | GEOMETRY | geo_shape +fid | INTEGER | integer +footprint | GEOMETRY | geo_shape +name | VARCHAR | text +name.keyword | VARCHAR | keyword +neatline | GEOMETRY | geo_shape +num_lanes | INTEGER | integer +ogc_type | VARCHAR | keyword +position | GEOMETRY | geo_shape +shore | GEOMETRY | geo_shape +shores | GEOMETRY | geo_shape +type | VARCHAR | keyword +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.json b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.json new file mode 100644 index 0000000000000..afdf2f5d61ac6 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.json @@ -0,0 +1,58 @@ +// This dataset is derived from OpenGIS Simple Features for SQL (Types and Functions) Test Suite on Apr 1, 2018 +// +// Copyright © 2018 Open Geospatial Consortium, Inc. All Rights Reserved. +// http://www.opengeospatial.org/ogc/legal +// +// lakes +{"index":{"_id": "101"}} +{"ogc_type":"lakes", "fid": 101, "name": "BLUE LAKE", "shore": "POLYGON ((52 18, 66 23, 73 9, 48 6, 52 18), (59 18, 67 18, 67 13, 59 13, 59 18))"} +// +// road segments +{"index":{"_id": "102"}} +{"ogc_type":"road_segments", "fid": 102, "name": "Route 5", "num_lanes": 2, "centerline": "LINESTRING (0 18, 10 21, 16 23, 28 26, 44 31)"} +{"index":{"_id": "103"}} +{"ogc_type":"road_segments", "fid": 103, "name": "Route 5", "aliases": "Main Street", "num_lanes": 4, "centerline": "LINESTRING (44 31, 56 34, 70 38)"} +{"index":{"_id": "104"}} +{"ogc_type":"road_segments", "fid": 104, "name": "Route 5", "num_lanes": 2, "centerline": "LINESTRING (70 38, 72 48)"} +{"index":{"_id": "105"}} +{"ogc_type":"road_segments", "fid": 105, "name": "Main Street", "num_lanes": 4, "centerline": "LINESTRING (70 38, 84 42)"} +{"index":{"_id": "106"}} +{"ogc_type":"road_segments", "fid": 106, "name": "Dirt Road by Green Forest", "num_lanes": 1, "centerline": "LINESTRING (28 26, 28 0)"} +// +// divided routes +{"index":{"_id": "119"}} +{"ogc_type":"divided_routes", "fid": 119, "name": "Route 75", "num_lanes": 4, "centerlines": "MULTILINESTRING ((10 48, 10 21, 10 0), (16 0, 16 23, 16 48))"} +// +// forests +{"index":{"_id": "109"}} +{"ogc_type":"forests", "fid": 109, "name": "Green Forest", "boundary": "MULTIPOLYGON (((28 26, 28 0, 84 0, 84 42, 28 26), (52 18, 66 23, 73 9, 48 6, 52 18)), ((59 18, 67 18, 67 13, 59 13, 59 18)))"} +// +// forests +{"index":{"_id": "110"}} +{"ogc_type":"bridges", "fid": 110, "name": "Cam Bridge", "position": "POINT (44 31)"} +// +// streams +{"index":{"_id": "111"}} +{"ogc_type":"streams", "fid": 111, "name": "Cam Stream", "centerline": "LINESTRING (38 48, 44 41, 41 36, 44 31, 52 18)"} +{"index":{"_id": "112"}} +{"ogc_type":"streams", "fid": 112, "centerline": "LINESTRING (76 0, 78 4, 73 9)"} +// +// buildings +{"index":{"_id": "113"}} +{"ogc_type":"buildings", "fid": 113, "address": "123 Main Street", "position": "POINT (52 30)", "footprint": "POLYGON ((50 31, 54 31, 54 29, 50 29, 50 31))"} +{"index":{"_id": "114"}} +{"ogc_type":"buildings", "fid": 114, "address": "215 Main Street", "position": "POINT (64 33)", "footprint": "POLYGON ((66 34, 62 34, 62 32, 66 32, 66 34))"} +// +// ponds +{"index":{"_id": "120"}} +{"ogc_type":"ponds", "fid": 120, "type": "Stock Pond", "shores": "MULTIPOLYGON (((24 44, 22 42, 24 40, 24 44)), ((26 44, 26 40, 28 42, 26 44)))"} +// +// named places +{"index":{"_id": "117"}} +{"ogc_type":"named_places", "fid": 117, "name": "Ashton", "boundary": "POLYGON ((62 48, 84 48, 84 30, 56 30, 56 34, 62 48))"} +{"index":{"_id": "118"}} +{"ogc_type":"named_places", "fid": 118, "name": "Goose Island", "boundary": "POLYGON ((67 13, 67 18, 59 18, 59 13, 67 13))"} +// +// map neat lines +{"index":{"_id": "115"}} +{"ogc_type":"map_neatlines", "fid": 115, "neatline": "POLYGON ((0 0, 0 48, 84 48, 84 0, 0 0))"} diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.sql-spec new file mode 100644 index 0000000000000..3976c5a8b181e --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/ogc.sql-spec @@ -0,0 +1,85 @@ +// +// Basic GEO SELECT +// + +selectLakes +SELECT fid, name, shore FROM lakes ORDER BY fid; +selectRoadSegments +SELECT fid, name, num_lanes, aliases, centerline FROM road_segments ORDER BY fid; +selectDividedRoutes +SELECT fid, name, num_lanes, centerlines FROM divided_routes ORDER BY fid; +selectForests +SELECT fid, name, boundary FROM forests ORDER BY fid; +selectBridges +SELECT fid, name, position FROM bridges ORDER BY fid; +selectStreams +SELECT fid, name, centerline FROM streams ORDER BY fid; +selectBuildings +SELECT fid, address, position, footprint FROM buildings ORDER BY fid; +selectPonds +SELECT fid, type, name, shores FROM ponds ORDER BY fid; +selectNamedPlaces +SELECT fid, name, boundary FROM named_places ORDER BY fid; +selectMapNeatLines +SELECT fid, neatline FROM map_neatlines ORDER BY fid; + +// +// Type conversion functions +// + +// The string serialization is slightly different between ES and H2, so we need to tweak it a bit by uppercasing both +// and removing floating point +selectRoadSegmentsAsWkt +SELECT fid, name, num_lanes, aliases, REPLACE(UCASE(ST_AsText(centerline)), '.0', '') centerline_wkt FROM road_segments ORDER BY fid; + +selectSinglePoint +SELECT ST_GeomFromText('point (10.0 12.0)') point; + + +// +// Geometry Property Functions +// +// H2GIS doesn't follow the standard here that mandates ST_Dimension returns SMALLINT +selectLakesProps +SELECT fid, UCASE(ST_GeometryType(shore)) type FROM lakes ORDER BY fid; +selectRoadSegmentsProps +SELECT fid, UCASE(ST_GeometryType(centerline)) type FROM road_segments ORDER BY fid; +selectDividedRoutesProps +SELECT fid, UCASE(ST_GeometryType(centerlines)) type FROM divided_routes ORDER BY fid; +selectForestsProps +SELECT fid, UCASE(ST_GeometryType(boundary)) type FROM forests ORDER BY fid; +selectBridgesProps +SELECT fid, UCASE(ST_GeometryType(position)) type FROM bridges ORDER BY fid; +selectStreamsProps +SELECT fid, UCASE(ST_GeometryType(centerline)) type FROM streams ORDER BY fid; +selectBuildingsProps +SELECT fid, UCASE(ST_GeometryType(position)) type1, UCASE(ST_GeometryType(footprint)) type2 FROM buildings ORDER BY fid; +selectPondsProps +SELECT fid, UCASE(ST_GeometryType(shores)) type FROM ponds ORDER BY fid; +selectNamedPlacesProps +SELECT fid, UCASE(ST_GeometryType(boundary)) type FROM named_places ORDER BY fid; +selectMapNeatLinesProps +SELECT fid, UCASE(ST_GeometryType(neatline)) type FROM map_neatlines ORDER BY fid; + +selectLakesXY +SELECT fid, ST_X(shore) x, ST_Y(shore) y FROM lakes ORDER BY fid; +selectRoadSegmentsXY +SELECT fid, ST_X(centerline) x, ST_Y(centerline) y FROM road_segments ORDER BY fid; +selectDividedRoutesXY +SELECT fid, ST_X(centerlines) x, ST_Y(centerlines) y FROM divided_routes ORDER BY fid; +selectForestsXY +SELECT fid, ST_X(boundary) x, ST_Y(boundary) y FROM forests ORDER BY fid; +selectBridgesPositionsXY +SELECT fid, ST_X(position) x, ST_Y(position) y FROM bridges ORDER BY fid; +selectStreamsXY +SELECT fid, ST_X(centerline) x, ST_Y(centerline) y FROM streams ORDER BY fid; +selectBuildingsXY +SELECT fid, ST_X(position) x, ST_Y(position) y FROM buildings ORDER BY fid; +selectBuildingsFootprintsXY +SELECT fid, ST_X(footprint) x, ST_Y(footprint) y FROM buildings ORDER BY fid; +selectPondsXY +SELECT fid, ST_X(shores) x, ST_Y(shores) y FROM ponds ORDER BY fid; +selectNamedPlacesXY +SELECT fid, ST_X(boundary) x, ST_Y(boundary) y FROM named_places ORDER BY fid; +selectMapNeatLinesXY +SELECT fid, ST_X(neatline) x, ST_Y(neatline) y FROM map_neatlines ORDER BY fid; diff --git a/x-pack/plugin/sql/qa/src/main/resources/ogc/sqltsch.sql b/x-pack/plugin/sql/qa/src/main/resources/ogc/sqltsch.sql new file mode 100644 index 0000000000000..6d1322ecd3690 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/ogc/sqltsch.sql @@ -0,0 +1,672 @@ +-- FILE: sqltsch.sql 10/01/98 +-- +-- 1 2 3 4 5 6 7 8 +--345678901234567890123456789012345678901234567890123456789012345678901234567890 +--////////////////////////////////////////////////////////////////////////////// +-- +-- Copyright 1998, Open GIS Consortium, Inc. +-- +-- The material in this document details an Open GIS Consortium Test Suite in +-- accordance with a license that your organization has signed. Please refer +-- to http://www.opengeospatial.org/testing/ to obtain a copy of the general license +-- (it is part of the Conformance Testing Agreement). +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- OpenGIS Simple Features for SQL (Types and Functions) Test Suite Software +-- +-- This file "sqltsch.sql" is part 1 of a two part standardized test +-- suite in SQL script form. The other file that is required for this test +-- suite, "sqltque.sql", one additional script is provided ("sqltcle.sql") that +-- performs cleanup operations between test runs, and other documents that +-- describe the OGC Conformance Test Program are available via the WWW at +-- http://www.opengeospatial.org/testing/index.htm +-- +-- NOTE CONCERNING INFORMATION ON CONFORMANCE TESTING AND THIS TEST SUITE +-- ---------------------------------------------------------------------- +-- +-- Organizations wishing to submit product for conformance testing should +-- access the above WWW site to discover the proper procedure for obtaining +-- a license to use the OpenGIS(R) certification mark associated with this +-- test suite. +-- +-- +-- NOTE CONCERNING TEST SUITE ADAPTATION +-- ------------------------------------- +-- +-- OGC recognizes that many products will have to adapt this test suite to +-- make it work properly. OGC has documented the allowable adaptations within +-- this test suite where possible. Other information about adaptations may be +-- discovered in the Test Suite Guidelines document for this test suite. +-- +-- PLEASE NOTE THE OGC REQUIRES THAT ADAPTATIONS ARE FULLY DOCUMENTED USING +-- LIBERAL COMMENT BLOCKS CONFORMING TO THE FOLLOWING FORMAT: +-- +-- -- !#@ ADAPTATION BEGIN +-- explanatory text goes here +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- original sql goes here +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +-- adated sql goes here +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- BEGIN TEST SUITE CODE +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- Create the neccessary feature and geometry tables(views) and metadata tables +-- (views) to load and query the "Blue Lake" test data for OpenGIS Simple +-- Features for SQL (Types and Functions) test. +-- +-- Required feature tables (views) are: +-- Lakes +-- Road Segments +-- Divided Routes +-- Buildings +-- Forests +-- Bridges +-- Named Places +-- Streams +-- Ponds +-- Map Neatlines +-- +-- Please refer to the Test Suite Guidelines for this test suite for further +-- information concerning this test data. +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- CREATE SPATIAL_REF_SYS METADATA TABLE +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- +-- *** ADAPTATION ALERT **** +-- Implementations do not need to execute this statement if they already +-- create the spatial_ref_sys table or view via another mechanism. +-- The size of the srtext VARCHAR exceeds that allowed on some systems. +-- +-- CREATE TABLE spatial_ref_sys ( +-- srid INTEGER NOT NULL PRIMARY KEY, +-- auth_name VARCHAR(256), +-- auth_srid INTEGER, +-- -- srtext VARCHAR(2048) +-- srtext VARCHAR(2000) +-- ); +-- -- +-- INSERT INTO spatial_ref_sys VALUES(101, 'POSC', 32214, +-- 'PROJCS["UTM_ZONE_14N", GEOGCS["World Geodetic System 72", +-- DATUM["WGS_72", SPHEROID["NWL_10D", 6378135, 298.26]], +-- PRIMEM["Greenwich", 0], UNIT["Meter", 1.0]], +-- PROJECTION["Transverse_Mercator"], +-- PARAMETER["False_Easting", 500000.0], +-- PARAMETER["False_Northing", 0.0], +-- PARAMETER["Central_Meridian", -99.0], +-- PARAMETER["Scale_Factor", 0.9996], +-- PARAMETER["Latitude_of_origin", 0.0], +-- UNIT["Meter", 1.0]]' +-- ); +-- +-- +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- CREATE FEATURE SCHEMA +-- +-- *** ADAPTATION ALERT *** +-- The following schema is created using CREATE TABLE statements. +-- Furthermore, it DOES NOT create the GEOMETRY_COLUMNS metadata table. +-- Implementer's should replace the CREATE TABLES below with the mechanism +-- that it uses to create feature tables and the GEOMETRY_COLUMNS table/view +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-------------------------------------------------------------------------------- +-- +-- Create feature tables +-- +-------------------------------------------------------------------------------- +-- +-- Lakes +-- +-- +-- +-- +CREATE TABLE lakes ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + shore POLYGON +); +-- +-- Road Segments +-- +-- +-- +-- +CREATE TABLE road_segments ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + aliases VARCHAR(64), + num_lanes INTEGER, + centerline LINESTRING +); +-- +-- Divided Routes +-- +-- +-- +-- +CREATE TABLE divided_routes ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + num_lanes INTEGER, + centerlines MULTILINESTRING +); +-- +-- Forests +-- +-- +-- +-- +CREATE TABLE forests ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + boundary MULTIPOLYGON +); +-- +-- Bridges +-- +-- +-- +-- +CREATE TABLE bridges ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + position POINT +); +-- +-- Streams +-- +-- +-- +-- +CREATE TABLE streams ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + centerline LINESTRING +); +-- +-- Buildings +-- +--*** ADAPTATION ALERT *** +-- A view could be used to provide the below semantics without multiple geometry +-- columns in a table. In other words, create two tables. One table would +-- contain the POINT position and the other would create the POLYGON footprint. +-- Then create a view with the semantics of the buildings table below. +-- +-- +-- +CREATE TABLE buildings ( + fid INTEGER NOT NULL PRIMARY KEY, + address VARCHAR(64), + position POINT, + footprint POLYGON +); +-- +-- Ponds +-- +-- +-- +-- +-- -- !#@ ADAPTATION BEGIN +-- Fixes typo in the MULTIPOYLGON type +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- CREATE TABLE ponds ( +-- fid INTEGER NOT NULL PRIMARY KEY, +-- name VARCHAR(64), +-- type VARCHAR(64), +-- shores MULTIPOYLGON +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +CREATE TABLE ponds ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + type VARCHAR(64), + shores MULTIPOLYGON +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +-- Named Places +-- +-- +-- +-- +CREATE TABLE named_places ( + fid INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(64), + boundary POLYGON +); +-- +-- Map Neatline +-- +-- +-- +-- +CREATE TABLE map_neatlines ( + fid INTEGER NOT NULL PRIMARY KEY, + neatline POLYGON +); +-- +-- +-- +--////////////////////////////////////////////////////////////////////////////// +-- +-- POPULATE GEOMETRY AND FEATURE TABLES +-- +-- *** ADAPTATION ALERT *** +-- This script DOES NOT make any inserts into a GEOMTERY_COLUMNS table/view. +-- Implementers should insert whatever makes this happen in their implementation +-- below. Furthermore, the inserts below may be replaced by whatever mechanism +-- may be provided by implementers to insert rows in feature tables such that +-- metadata (and other mechanisms) are updated properly. +-- +--////////////////////////////////////////////////////////////////////////////// +-- +--============================================================================== +-- Lakes +-- +-- We have one lake, Blue Lake. It is a polygon with a hole. Its geometry is +-- described in WKT format as: +-- 'POLYGON( (52 18, 66 23, 73 9, 48 6, 52 18), +-- (59 18, 67 18, 67 13, 59 13, 59 18) )' +--============================================================================== +-- +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO lakes VALUES (101, 'BLUE LAKE', +-- PolygonFromText('POLYGON((52 18,66 23,73 9,48 6,52 18),(59 18,67 18,67 13,59 13,59 18))', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO lakes VALUES (101, 'BLUE LAKE', + ST_PolyFromText('POLYGON((52 18,66 23,73 9,48 6,52 18),(59 18,67 18,67 13,59 13,59 18))', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Road segments +-- +-- We have five road segments. Their geometries are all linestrings. +-- The geometries are described in WKT format as: +-- name 'Route 5', fid 102 +-- 'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' +-- name 'Route 5', fid 103 +-- 'LINESTRING( 44 31, 56 34, 70 38 )' +-- name 'Route 5', fid 104 +-- 'LINESTRING( 70 38, 72 48 )' +-- name 'Main Street', fid 105 +-- 'LINESTRING( 70 38, 84 42 )' +-- name 'Dirt Road by Green Forest', fid 106 +-- 'LINESTRING( 28 26, 28 0 )' +-- +--================== +-- +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO road_segments VALUES(102, 'Route 5', NULL, 2, +-- LineStringFromText('LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(103, 'Route 5', 'Main Street', 4, +-- LineStringFromText('LINESTRING( 44 31, 56 34, 70 38 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(104, 'Route 5', NULL, 2, +-- LineStringFromText('LINESTRING( 70 38, 72 48 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(105, 'Main Street', NULL, 4, +-- LineStringFromText('LINESTRING( 70 38, 84 42 )' ,101) +-- ); +-- INSERT INTO road_segments VALUES(106, 'Dirt Road by Green Forest', NULL, 1, +-- LineStringFromText('LINESTRING( 28 26, 28 0 )',101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO road_segments VALUES(102, 'Route 5', NULL, 2, + ST_LineFromText('LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101) +); +INSERT INTO road_segments VALUES(103, 'Route 5', 'Main Street', 4, + ST_LineFromText('LINESTRING( 44 31, 56 34, 70 38 )' ,101) +); +INSERT INTO road_segments VALUES(104, 'Route 5', NULL, 2, + ST_LineFromText('LINESTRING( 70 38, 72 48 )' ,101) +); +INSERT INTO road_segments VALUES(105, 'Main Street', NULL, 4, + ST_LineFromText('LINESTRING( 70 38, 84 42 )' ,101) +); +INSERT INTO road_segments VALUES(106, 'Dirt Road by Green Forest', NULL, 1, + ST_LineFromText('LINESTRING( 28 26, 28 0 )',101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +--================== +-- DividedRoutes +-- +-- We have one divided route. Its geometry is a multilinestring. +-- The geometry is described in WKT format as: +-- 'MULTILINESTRING( (10 48, 10 21, 10 0), (16 0, 10 23, 16 48) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO divided_routes VALUES(119, 'Route 75', 4, +-- MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO divided_routes VALUES(119, 'Route 75', 4, + ST_MLineFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Forests +-- +-- We have one forest. Its geometry is a multipolygon. +-- The geometry is described in WKT format as: +-- 'MULTIPOLYGON( ( (28 26, 28 0, 84 0, 84 42, 28 26), +-- (52 18, 66 23, 73 9, 48 6, 52 18) ), +-- ( (59 18, 67 18, 67 13, 59 13, 59 18) ) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO forests VALUES(109, 'Green Forest', +-- MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO forests VALUES(109, 'Green Forest', + ST_MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +--================== +-- Bridges +-- +-- We have one bridge. Its geometry is a point. +-- The geometry is described in WKT format as: +-- 'POINT( 44 31 )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO bridges VALUES(110, 'Cam Bridge', +-- PointFromText('POINT( 44 31 )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO bridges VALUES(110, 'Cam Bridge', + ST_PointFromText('POINT( 44 31 )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Streams +-- +-- We have two streams. Their geometries are linestrings. +-- The geometries are described in WKT format as: +-- 'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )' +-- 'LINESTRING( 76 0, 78 4, 73 9 )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO streams VALUES(111, 'Cam Stream', +-- LineStringFromText('LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101) +-- ); +-- INSERT INTO streams VALUES(112, NULL, +-- LineStringFromText('LINESTRING( 76 0, 78 4, 73 9 )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO streams VALUES(111, 'Cam Stream', + ST_LineFromText('LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101) +); +INSERT INTO streams VALUES(112, NULL, + ST_LineFromText('LINESTRING( 76 0, 78 4, 73 9 )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Buildings +-- +-- We have two buildings. Their geometries are points and polygons. +-- The geometries are described in WKT format as: +-- address '123 Main Street' fid 113 +-- 'POINT( 52 30 )' and +-- 'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )' +-- address '215 Main Street' fid 114 +-- 'POINT( 64 33 )' and +-- 'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO buildings VALUES(113, '123 Main Street', +-- PointFromText('POINT( 52 30 )', 101), +-- PolygonFromText('POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101) +-- ); +-- INSERT INTO buildings VALUES(114, '215 Main Street', +-- PointFromText('POINT( 64 33 )', 101), +-- PolygonFromText('POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO buildings VALUES(113, '123 Main Street', + ST_PointFromText('POINT( 52 30 )', 101), + ST_PolyFromText('POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101) +); +INSERT INTO buildings VALUES(114, '215 Main Street', + ST_PointFromText('POINT( 64 33 )', 101), + ST_PolyFromText('POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Ponds +-- +-- We have one pond. Its geometry is a multipolygon. +-- The geometry is described in WKT format as: +-- 'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), ( ( 26 44, 26 40, 28 42, 26 44) ) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO ponds VALUES(120, NULL, 'Stock Pond', +-- MultiPolygonFromText('MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO ponds VALUES(120, NULL, 'Stock Pond', + ST_MPolyFromText('MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END + +-- +--================== +-- Named Places +-- +-- We have two named places. Their geometries are polygons. +-- The geometries are described in WKT format as: +-- name 'Ashton' fid 117 +-- 'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )' +-- address 'Goose Island' fid 118 +-- 'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO named_places VALUES(117, 'Ashton', +-- PolygonFromText('POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101) +-- ); +-- INSERT INTO named_places VALUES(118, 'Goose Island', +-- PolygonFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO named_places VALUES(117, 'Ashton', + ST_PolyFromText('POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101) +); +INSERT INTO named_places VALUES(118, 'Goose Island', + ST_PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +--================== +-- Map Neatlines +-- +-- We have one map neatline. Its geometry is a polygon. +-- The geometry is described in WKT format as: +-- 'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )' +-- +--================== +-- +-- -- !#@ ADAPTATION BEGIN +-- Adds ST_ prefix to routing names +-- --------------------- +-- -- BEGIN ORIGINAL SQL +-- --------------------- +-- INSERT INTO map_neatlines VALUES(115, +-- PolygonFromText('POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101) +-- ); +-- --------------------- +-- -- END ORIGINAL SQL +-- --------------------- +-- -- BEGIN ADAPTED SQL +-- --------------------- +INSERT INTO map_neatlines VALUES(115, + ST_PolyFromText('POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101) +); +-- --------------------- +-- -- END ADAPTED SQL +-- --------------------- +-- -- !#@ ADAPTATION END +-- +-- +-- +-- end sqltsch.sql \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec new file mode 100644 index 0000000000000..c9380fae2809e --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys-geo.csv-spec @@ -0,0 +1,15 @@ +// +// Geo-specific Sys Commands +// + +geoSysColumns +SYS COLUMNS TABLE LIKE 'geo'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i|BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i| NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |city |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |location |114 |GEO_POINT |58 |16 |null |null |1 |null |null |114 |0 |null |2 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |location_no_dv |114 |GEO_POINT |58 |16 |null |null |1 |null |null |114 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |region |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |region_point |12 |KEYWORD |32766 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster|null |geo |shape |114 |GEO_SHAPE |2147483647 |2147483647 |null |null |1 |null |null |114 |0 |null |6 |YES |null |null |null |null |NO |NO +; \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index db84a444f5794..d5a4cb436e6a5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -63,6 +63,7 @@ import static org.elasticsearch.xpack.sql.stats.FeatureMetric.LOCAL; import static org.elasticsearch.xpack.sql.stats.FeatureMetric.ORDERBY; import static org.elasticsearch.xpack.sql.stats.FeatureMetric.WHERE; +import static org.elasticsearch.xpack.sql.type.DataType.GEO_SHAPE; /** * The verifier has the role of checking the analyzed tree for failures and build a list of failures following this check. @@ -131,7 +132,6 @@ Collection verify(LogicalPlan plan) { // start bottom-up plan.forEachUp(p -> { - if (p.analyzed()) { return; } @@ -236,6 +236,7 @@ Collection verify(LogicalPlan plan) { checkForScoreInsideFunctions(p, localFailures); checkNestedUsedInGroupByOrHaving(p, localFailures); + checkForGeoFunctionsOnDocValues(p, localFailures); // everything checks out // mark the plan as analyzed @@ -719,4 +720,33 @@ private static void checkNestedUsedInGroupByOrHaving(LogicalPlan p, Set fail(nested.get(0), "HAVING isn't (yet) compatible with nested fields " + new AttributeSet(nested).names())); } } + + /** + * Makes sure that geo shapes do not appear in filter, aggregation and sorting contexts + */ + private static void checkForGeoFunctionsOnDocValues(LogicalPlan p, Set localFailures) { + + p.forEachDown(f -> { + f.condition().forEachUp(fa -> { + if (fa.field().getDataType() == GEO_SHAPE) { + localFailures.add(fail(fa, "geo shapes cannot be used for filtering")); + } + }, FieldAttribute.class); + }, Filter.class); + + // geo shape fields shouldn't be used in aggregates or having (yet) + p.forEachDown(a -> a.groupings().forEach(agg -> agg.forEachUp(fa -> { + if (fa.field().getDataType() == GEO_SHAPE) { + localFailures.add(fail(fa, "geo shapes cannot be used in grouping")); + } + }, FieldAttribute.class)), Aggregate.class); + + + // geo shape fields shouldn't be used in order by clauses + p.forEachDown(o -> o.order().forEach(agg -> agg.forEachUp(fa -> { + if (fa.field().getDataType() == GEO_SHAPE) { + localFailures.add(fail(fa, "geo shapes cannot be used for sorting")); + } + }, FieldAttribute.class)), OrderBy.class); + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java index 652197473abf4..13294fbca221b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java @@ -5,13 +5,17 @@ */ package org.elasticsearch.xpack.sql.execution.search.extractor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.DateUtils; @@ -128,13 +132,31 @@ private Object unwrapMultiValue(Object values) { if (list.isEmpty()) { return null; } else { - if (arrayLeniency || list.size() == 1) { - return unwrapMultiValue(list.get(0)); - } else { - throw new SqlIllegalArgumentException("Arrays (returned by [{}]) are not supported", fieldName); + // let's make sure first that we are not dealing with an geo_point represented as an array + if (isGeoPointArray(list) == false) { + if (list.size() == 1 || arrayLeniency) { + return unwrapMultiValue(list.get(0)); + } else { + throw new SqlIllegalArgumentException("Arrays (returned by [{}]) are not supported", fieldName); + } } } } + if (dataType == DataType.GEO_POINT) { + try { + GeoPoint geoPoint = GeoUtils.parseGeoPoint(values, true); + return new GeoShape(geoPoint.lon(), geoPoint.lat()); + } catch (ElasticsearchParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse geo_point value [{}] (returned by [{}])", values, fieldName); + } + } + if (dataType == DataType.GEO_SHAPE) { + try { + return new GeoShape(values); + } catch (IOException ex) { + throw new SqlIllegalArgumentException("Cannot read geo_shape value [{}] (returned by [{}])", values, fieldName); + } + } if (values instanceof Map) { throw new SqlIllegalArgumentException("Objects (returned by [{}]) are not supported", fieldName); } @@ -149,6 +171,17 @@ private Object unwrapMultiValue(Object values) { throw new SqlIllegalArgumentException("Type {} (returned by [{}]) is not supported", values.getClass().getSimpleName(), fieldName); } + private boolean isGeoPointArray(List list) { + if (dataType != DataType.GEO_POINT) { + return false; + } + // we expect the point in [lon lat] or [lon lat alt] formats + if (list.size() > 3 || list.size() < 1) { + return false; + } + return list.get(0) instanceof Number; + } + @SuppressWarnings({ "unchecked", "rawtypes" }) Object extractFromSource(Map map) { Object value = null; @@ -173,7 +206,9 @@ Object extractFromSource(Map map) { if (node instanceof List) { List listOfValues = (List) node; - if (listOfValues.size() == 1 || arrayLeniency) { + // we can only do this optimization until the last element of our pass since geo points are using arrays + // and we don't want to blindly ignore the second element of array if arrayLeniency is enabled + if ((i < path.length - 1) && (listOfValues.size() == 1 || arrayLeniency)) { // this is a List with a size of 1 e.g.: {"a" : [{"b" : "value"}]} meaning the JSON is a list with one element // or a list of values with one element e.g.: {"a": {"b" : ["value"]}} // in case of being lenient about arrays, just extract the first value in the array diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java index f6e1e3ad8be69..d382dad83a19d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java @@ -57,6 +57,11 @@ public static TypeResolution isNumericOrDateOrTime(Expression e, String operatio "date", "time", "datetime", "numeric"); } + + public static TypeResolution isGeo(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isGeo, operationName, paramOrd, "geo_point", "geo_shape"); + } + public static TypeResolution isExact(Expression e, String message) { if (e instanceof FieldAttribute) { EsField.Exact exact = ((FieldAttribute) e).getExactInfo(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java index 0e9f07ef2132c..3a9ae06203476 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java @@ -46,6 +46,13 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.SecondOfMinute; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.WeekOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Year; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StAswkt; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistance; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StGeometryType; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StWkttosql; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StX; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StY; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StZ; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ACos; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ASin; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ATan; @@ -249,11 +256,23 @@ private void defineDefaultFunctions() { def(Space.class, Space::new, "SPACE"), def(Substring.class, Substring::new, "SUBSTRING"), def(UCase.class, UCase::new, "UCASE")); + // DataType conversion addToMap(def(Cast.class, Cast::new, "CAST", "CONVERT")); // Scalar "meta" functions addToMap(def(Database.class, Database::new, "DATABASE"), def(User.class, User::new, "USER")); + + // Geo Functions + addToMap(def(StAswkt.class, StAswkt::new, "ST_ASWKT", "ST_ASTEXT"), + def(StDistance.class, StDistance::new, "ST_DISTANCE"), + def(StWkttosql.class, StWkttosql::new, "ST_WKTTOSQL", "ST_GEOMFROMTEXT"), + def(StGeometryType.class, StGeometryType::new, "ST_GEOMETRYTYPE"), + def(StX.class, StX::new, "ST_X"), + def(StY.class, StY::new, "ST_Y"), + def(StZ.class, StZ::new, "ST_Z") + ); + // Special addToMap(def(Score.class, Score::new, "SCORE")); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java index d14aeea507f47..0b9bbd1094a44 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java @@ -11,6 +11,9 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NonIsoDateTimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistanceProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StWkttosqlProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.TimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor; @@ -98,6 +101,10 @@ public static List getNamedWriteables() { entries.add(new Entry(Processor.class, LocateFunctionProcessor.NAME, LocateFunctionProcessor::new)); entries.add(new Entry(Processor.class, ReplaceFunctionProcessor.NAME, ReplaceFunctionProcessor::new)); entries.add(new Entry(Processor.class, SubstringFunctionProcessor.NAME, SubstringFunctionProcessor::new)); + // geo + entries.add(new Entry(Processor.class, GeoProcessor.NAME, GeoProcessor::new)); + entries.add(new Entry(Processor.class, StWkttosqlProcessor.NAME, StWkttosqlProcessor::new)); + entries.add(new Entry(Processor.class, StDistanceProcessor.NAME, StDistanceProcessor::new)); return entries; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessor.java new file mode 100644 index 0000000000000..519e4c0c74092 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessor.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; +import java.util.function.Function; + +public class GeoProcessor implements Processor { + + private interface GeoShapeFunction { + default R apply(Object o) { + if (o instanceof GeoShape) { + return doApply((GeoShape) o); + } else { + throw new SqlIllegalArgumentException("A geo_point or geo_shape is required; received [{}]", o); + } + } + + R doApply(GeoShape s); + } + + public enum GeoOperation { + ASWKT(GeoShape::toString), + GEOMETRY_TYPE(GeoShape::getGeometryType), + X(GeoShape::getX), + Y(GeoShape::getY), + Z(GeoShape::getZ); + + private final Function apply; + + GeoOperation(GeoShapeFunction apply) { + this.apply = l -> l == null ? null : apply.apply(l); + } + + public final Object apply(Object l) { + return apply.apply(l); + } + } + + public static final String NAME = "geo"; + + private final GeoOperation processor; + + public GeoProcessor(GeoOperation processor) { + this.processor = processor; + } + + public GeoProcessor(StreamInput in) throws IOException { + processor = in.readEnum(GeoOperation.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(processor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object process(Object input) { + return processor.apply(input); + } + + GeoOperation processor() { + return processor; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + GeoProcessor other = (GeoProcessor) obj; + return processor == other.processor; + } + + @Override + public int hashCode() { + return processor.hashCode(); + } + + @Override + public String toString() { + return processor.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java new file mode 100644 index 0000000000000..74b5c9646b853 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoShape.java @@ -0,0 +1,222 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.geo.GeometryParser; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.GeometryVisitor; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Polygon; +import org.elasticsearch.geo.geometry.Rectangle; +import org.elasticsearch.geo.utils.WellKnownText; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import java.io.IOException; +import java.io.InputStream; +import java.text.ParseException; +import java.util.Objects; + +/** + * Wrapper class to represent a GeoShape in SQL + * + * It is required to override the XContent serialization. The ShapeBuilder serializes using GeoJSON by default, + * but in SQL we need the serialization to be WKT-based. + */ +public class GeoShape implements ToXContentFragment, NamedWriteable { + + public static final String NAME = "geo"; + + private final Geometry shape; + + public GeoShape(double lon, double lat) { + shape = new Point(lat, lon); + } + + public GeoShape(Object value) throws IOException { + try { + shape = parse(value); + } catch (ParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse [" + value + "] as a geo_shape value", ex); + } + } + + public GeoShape(StreamInput in) throws IOException { + String value = in.readString(); + try { + shape = parse(value); + } catch (ParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse [" + value + "] as a geo_shape value", ex); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(WellKnownText.toWKT(shape)); + } + + @Override + public String toString() { + return WellKnownText.toWKT(shape); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(WellKnownText.toWKT(shape)); + } + + public Geometry toGeometry() { + return shape; + } + + public Point firstPoint() { + return shape.visit(new GeometryVisitor() { + @Override + public Point visit(Circle circle) { + return new Point(circle.getLat(), circle.getLon(), circle.hasAlt() ? circle.getAlt() : Double.NaN); + } + + @Override + public Point visit(GeometryCollection collection) { + if (collection.size() > 0) { + return collection.get(0).visit(this); + } + return null; + } + + @Override + public Point visit(Line line) { + if (line.length() > 0) { + return new Point(line.getLat(0), line.getLon(0), line.hasAlt() ? line.getAlt(0) : Double.NaN); + } + return null; + } + + @Override + public Point visit(LinearRing ring) { + return visit((Line) ring); + } + + @Override + public Point visit(MultiLine multiLine) { + return visit((GeometryCollection) multiLine); + } + + @Override + public Point visit(MultiPoint multiPoint) { + return visit((GeometryCollection) multiPoint); + } + + @Override + public Point visit(MultiPolygon multiPolygon) { + return visit((GeometryCollection) multiPolygon); + } + + @Override + public Point visit(Point point) { + return point; + } + + @Override + public Point visit(Polygon polygon) { + return visit(polygon.getPolygon()); + } + + @Override + public Point visit(Rectangle rectangle) { + return new Point(rectangle.getMinLat(), rectangle.getMinLon(), rectangle.getMinAlt()); + } + }); + } + + public Double getX() { + Point firstPoint = firstPoint(); + return firstPoint != null ? firstPoint.getLon() : null; + } + + public Double getY() { + Point firstPoint = firstPoint(); + return firstPoint != null ? firstPoint.getLat() : null; + } + + public Double getZ() { + Point firstPoint = firstPoint(); + return firstPoint != null && firstPoint.hasAlt() ? firstPoint.getAlt() : null; + } + + public String getGeometryType() { + return toGeometry().type().name(); + } + + public static double distance(GeoShape shape1, GeoShape shape2) { + if (shape1.shape instanceof Point == false) { + throw new SqlIllegalArgumentException("distance calculation is only supported for points; received [{}]", shape1); + } + if (shape2.shape instanceof Point == false) { + throw new SqlIllegalArgumentException("distance calculation is only supported for points; received [{}]", shape2); + } + double srcLat = ((Point) shape1.shape).getLat(); + double srcLon = ((Point) shape1.shape).getLon(); + double dstLat = ((Point) shape2.shape).getLat(); + double dstLon = ((Point) shape2.shape).getLon(); + return GeoUtils.arcDistance(srcLat, srcLon, dstLat, dstLon); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GeoShape geoShape = (GeoShape) o; + return shape.equals(geoShape.shape); + } + + @Override + public int hashCode() { + return Objects.hash(shape); + } + + @Override + public String getWriteableName() { + return NAME; + } + + private static Geometry parse(Object value) throws IOException, ParseException { + XContentBuilder content = JsonXContent.contentBuilder(); + content.startObject(); + content.field("value", value); + content.endObject(); + + try (InputStream stream = BytesReference.bytes(content).streamInput(); + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // field value + return GeometryParser.parse(parser, true, true, true); + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StAswkt.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StAswkt.java new file mode 100644 index 0000000000000..5c4b6edbe87eb --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StAswkt.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_AsWKT function that takes a geometry and returns its Well Known Text representation + */ +public class StAswkt extends UnaryGeoFunction { + + public StAswkt(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StAswkt::new, field()); + } + + @Override + protected StAswkt replaceChild(Expression newChild) { + return new StAswkt(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.ASWKT; + } + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistance.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistance.java new file mode 100644 index 0000000000000..fd14e90dd9d93 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistance.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.expression.predicate.BinaryOperator; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isGeo; +import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; + +/** + * Calculates the distance between two points + */ +public class StDistance extends BinaryOperator { + + private static final StDistanceFunction FUNCTION = new StDistanceFunction(); + + public StDistance(Source source, Expression source1, Expression source2) { + super(source, source1, source2, FUNCTION); + } + + @Override + protected StDistance replaceChildren(Expression newLeft, Expression newRight) { + return new StDistance(source(), newLeft, newRight); + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StDistance::new, left(), right()); + } + + @Override + public ScriptTemplate scriptWithField(FieldAttribute field) { + return new ScriptTemplate(processScript("{sql}.geoDocValue(doc,{})"), + paramsBuilder().variable(field.exactAttribute().name()).build(), + dataType()); + } + + @Override + protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { + return isGeo(e, sourceText(), paramOrdinal); + } + + @Override + public StDistance swapLeftAndRight() { + return new StDistance(source(), right(), left()); + } + + @Override + protected Pipe makePipe() { + return new StDistancePipe(source(), this, Expressions.pipe(left()), Expressions.pipe(right())); + } + + @Override + protected String scriptMethodName() { + return "stDistance"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceFunction.java new file mode 100644 index 0000000000000..d1c15c1e2a1b2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceFunction.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.predicate.PredicateBiFunction; + +class StDistanceFunction implements PredicateBiFunction { + + @Override + public String name() { + return "ST_DISTANCE"; + } + + @Override + public String symbol() { + return "ST_DISTANCE"; + } + + @Override + public Double doApply(Object s1, Object s2) { + return StDistanceProcessor.process(s1, s2); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistancePipe.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistancePipe.java new file mode 100644 index 0000000000000..c944266482651 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistancePipe.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.BinaryPipe; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; + +import java.util.Objects; + +public class StDistancePipe extends BinaryPipe { + + public StDistancePipe(Source source, Expression expression, Pipe left, Pipe right) { + super(source, expression, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StDistancePipe::new, expression(), left(), right()); + } + + @Override + protected BinaryPipe replaceChildren(Pipe left, Pipe right) { + return new StDistancePipe(source(), expression(), left, right); + } + + @Override + public StDistanceProcessor asProcessor() { + return new StDistanceProcessor(left().asProcessor(), right().asProcessor()); + } + + @Override + public int hashCode() { + return Objects.hash(left(), right()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + StDistancePipe other = (StDistancePipe) obj; + return Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java new file mode 100644 index 0000000000000..d6c9026b982d9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessor.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.gen.processor.BinaryProcessor; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; +import java.util.Objects; + +public class StDistanceProcessor extends BinaryProcessor { + + public static final String NAME = "geo_distance"; + + public StDistanceProcessor(Processor source1, Processor source2) { + super(source1, source2); + } + + public StDistanceProcessor(StreamInput in) throws IOException { + super(in); + } + + @Override + protected void doWrite(StreamOutput out) throws IOException { + + } + + @Override + public Object process(Object input) { + Object l = left().process(input); + checkParameter(l); + Object r = right().process(input); + checkParameter(r); + return doProcess(l, r); + } + + @Override + protected Object doProcess(Object left, Object right) { + return process(left, right); + } + + public static Double process(Object source1, Object source2) { + if (source1 == null || source2 == null) { + return null; + } + + if (source1 instanceof GeoShape == false) { + throw new SqlIllegalArgumentException("A geo_point or geo_shape with type point is required; received [{}]", source1); + } + if (source2 instanceof GeoShape == false) { + throw new SqlIllegalArgumentException("A geo_point or geo_shape with type point is required; received [{}]", source2); + } + return GeoShape.distance((GeoShape) source1, (GeoShape) source2); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + StDistanceProcessor other = (StDistanceProcessor) obj; + return Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } + + @Override + public int hashCode() { + return Objects.hash(left(), right()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StGeometryType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StGeometryType.java new file mode 100644 index 0000000000000..15215bd9201de --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StGeometryType.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_GEOMETRY_TYPE function that takes a geometry and returns its type + */ +public class StGeometryType extends UnaryGeoFunction { + + public StGeometryType(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StGeometryType::new, field()); + } + + @Override + protected StGeometryType replaceChild(Expression newChild) { + return new StGeometryType(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.GEOMETRY_TYPE; + } + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosql.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosql.java new file mode 100644 index 0000000000000..3ebae55dec4f0 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosql.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.gen.script.Scripts; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isString; + +/** + * Constructs geometric objects from their WTK representations + */ +public class StWkttosql extends UnaryScalarFunction { + + public StWkttosql(Source source, Expression field) { + super(source, field); + } + + @Override + protected StWkttosql replaceChild(Expression newChild) { + return new StWkttosql(source(), newChild); + } + + @Override + protected TypeResolution resolveType() { + if (field().dataType().isString()) { + return TypeResolution.TYPE_RESOLVED; + } + return isString(field(), functionName(), Expressions.ParamOrdinal.DEFAULT); + } + + @Override + protected Processor makeProcessor() { + return StWkttosqlProcessor.INSTANCE; + } + + @Override + public DataType dataType() { + return DataType.GEO_SHAPE; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StWkttosql::new, field()); + } + + @Override + public String processScript(String script) { + return Scripts.formatTemplate(Scripts.SQL_SCRIPTS + ".stWktToSql(" + script + ")"); + } + + @Override + public Object fold() { + return StWkttosqlProcessor.INSTANCE.process(field().fold()); + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessor.java new file mode 100644 index 0000000000000..f17ee2315befe --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessor.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; + +public class StWkttosqlProcessor implements Processor { + + static final StWkttosqlProcessor INSTANCE = new StWkttosqlProcessor(); + + public static final String NAME = "geo_wkttosql"; + + StWkttosqlProcessor() { + } + + public StWkttosqlProcessor(StreamInput in) throws IOException { + } + + @Override + public Object process(Object input) { + return StWkttosqlProcessor.apply(input); + } + + public static GeoShape apply(Object input) { + if (input == null) { + return null; + } + + if ((input instanceof String) == false) { + throw new SqlIllegalArgumentException("A string is required; received [{}]", input); + } + try { + return new GeoShape(input); + } catch (IOException | IllegalArgumentException | ElasticsearchParseException ex) { + throw new SqlIllegalArgumentException("Cannot parse [{}] as a geo_shape value", input); + } + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StX.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StX.java new file mode 100644 index 0000000000000..f3cdafbe70dab --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StX.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_X function that takes a geometry and returns the X coordinate of its first point + */ +public class StX extends UnaryGeoFunction { + + public StX(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StX::new, field()); + } + + @Override + protected StX replaceChild(Expression newChild) { + return new StX(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.X; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StY.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StY.java new file mode 100644 index 0000000000000..0a9bc3aa1a40b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StY.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_Y function that takes a geometry and returns the Y coordinate of its first point + */ +public class StY extends UnaryGeoFunction { + + public StY(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StY::new, field()); + } + + @Override + protected StY replaceChild(Expression newChild) { + return new StY(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.Y; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StZ.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StZ.java new file mode 100644 index 0000000000000..b6c0c9466bbe1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StZ.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * ST_Z function that takes a geometry and returns the Z coordinate of its first point + */ +public class StZ extends UnaryGeoFunction { + + public StZ(Source source, Expression field) { + super(source, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StZ::new, field()); + } + + @Override + protected StZ replaceChild(Expression newChild) { + return new StZ(source(), newChild); + } + + @Override + protected GeoOperation operation() { + return GeoOperation.Z; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/UnaryGeoFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/UnaryGeoFunction.java new file mode 100644 index 0000000000000..50c05b7fbedb7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/UnaryGeoFunction.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.Locale; +import java.util.Objects; + +import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isGeo; +import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; + +/** + * Base class for functions that get a single geo shape or geo point as an argument + */ +public abstract class UnaryGeoFunction extends UnaryScalarFunction { + + protected UnaryGeoFunction(Source source, Expression field) { + super(source, field); + } + + @Override + public Object fold() { + return operation().apply(field().fold()); + } + + @Override + protected TypeResolution resolveType() { + if (!childrenResolved()) { + return new TypeResolution("Unresolved children"); + } + return isGeo(field(), operation().toString(), Expressions.ParamOrdinal.DEFAULT); + } + + @Override + protected Processor makeProcessor() { + return new GeoProcessor(operation()); + } + + protected abstract GeoProcessor.GeoOperation operation(); + + @Override + public ScriptTemplate scriptWithField(FieldAttribute field) { + //TODO change this to use _source instead of the exact form (aka field.keyword for geo shape fields) + return new ScriptTemplate(processScript("{sql}.geoDocValue(doc,{})"), + paramsBuilder().variable(field.exactAttribute().name()).build(), + dataType()); + } + + @Override + public String processScript(String template) { + // basically, transform the script to InternalSqlScriptUtils.[function_name](other_function_or_field_name) + return super.processScript( + format(Locale.ROOT, "{sql}.%s(%s)", + StringUtils.underscoreToLowerCamelCase("ST_" + operation().name()), + template)); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + UnaryGeoFunction other = (UnaryGeoFunction) obj; + return Objects.equals(other.field(), field()); + } + + @Override + public int hashCode() { + return Objects.hash(field()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java index 6a4ec411fe1cf..d39aec4423684 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.expression.function.scalar.whitelist; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.script.JodaCompatibleZonedDateTime; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; @@ -12,6 +13,10 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NonIsoDateTimeProcessor.NonIsoDateTimeExtractor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistanceProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StWkttosqlProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.TimeFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor.BinaryOptionalMathOperation; @@ -73,7 +78,7 @@ public static Object docValue(Map> doc, String fi } return null; } - + public static boolean nullSafeFilter(Boolean filter) { return filter == null ? false : filter.booleanValue(); } @@ -109,7 +114,7 @@ public static Boolean neq(Object left, Object right) { public static Boolean lt(Object left, Object right) { return BinaryComparisonOperation.LT.apply(left, right); } - + public static Boolean lte(Object left, Object right) { return BinaryComparisonOperation.LTE.apply(left, right); } @@ -125,7 +130,7 @@ public static Boolean gte(Object left, Object right) { public static Boolean and(Boolean left, Boolean right) { return BinaryLogicOperation.AND.apply(left, right); } - + public static Boolean or(Boolean left, Boolean right) { return BinaryLogicOperation.OR.apply(left, right); } @@ -328,14 +333,14 @@ public static Integer dateTimeChrono(Object dateTime, String tzId, String chrono } return DateTimeFunction.dateTimeChrono(asDateTime(dateTime), tzId, chronoName); } - + public static String dayName(Object dateTime, String tzId) { if (dateTime == null || tzId == null) { return null; } return NameExtractor.DAY_NAME.extract(asDateTime(dateTime), tzId); } - + public static Integer dayOfWeek(Object dateTime, String tzId) { if (dateTime == null || tzId == null) { return null; @@ -349,7 +354,7 @@ public static String monthName(Object dateTime, String tzId) { } return NameExtractor.MONTH_NAME.extract(asDateTime(dateTime), tzId); } - + public static Integer quarter(Object dateTime, String tzId) { if (dateTime == null || tzId == null) { return null; @@ -390,7 +395,7 @@ private static Object asDateTime(Object dateTime, boolean lenient) { } return dateTime; } - + public static IntervalDayTime intervalDayTime(String text, String typeName) { if (text == null || typeName == null) { return null; @@ -416,7 +421,7 @@ public static OffsetTime asTime(String time) { public static Integer ascii(String s) { return (Integer) StringOperation.ASCII.apply(s); } - + public static Integer bitLength(String s) { return (Integer) StringOperation.BIT_LENGTH.apply(s); } @@ -428,7 +433,7 @@ public static String character(Number n) { public static Integer charLength(String s) { return (Integer) StringOperation.CHAR_LENGTH.apply(s); } - + public static String concat(String s1, String s2) { return (String) ConcatFunctionProcessor.process(s1, s2); } @@ -452,7 +457,7 @@ public static Integer length(String s) { public static Integer locate(String s1, String s2) { return locate(s1, s2, null); } - + public static Integer locate(String s1, String s2, Number pos) { return LocateFunctionProcessor.doProcess(s1, s2, pos); } @@ -460,7 +465,7 @@ public static Integer locate(String s1, String s2, Number pos) { public static String ltrim(String s) { return (String) StringOperation.LTRIM.apply(s); } - + public static Integer octetLength(String s) { return (Integer) StringOperation.OCTET_LENGTH.apply(s); } @@ -468,15 +473,15 @@ public static Integer octetLength(String s) { public static Integer position(String s1, String s2) { return (Integer) BinaryStringStringOperation.POSITION.apply(s1, s2); } - + public static String repeat(String s, Number count) { return BinaryStringNumericOperation.REPEAT.apply(s, count); } - + public static String replace(String s1, String s2, String s3) { return (String) ReplaceFunctionProcessor.doProcess(s1, s2, s3); } - + public static String right(String s, Number count) { return BinaryStringNumericOperation.RIGHT.apply(s, count); } @@ -496,7 +501,47 @@ public static String substring(String s, Number start, Number length) { public static String ucase(String s) { return (String) StringOperation.UCASE.apply(s); } - + + public static String stAswkt(Object v) { + return GeoProcessor.GeoOperation.ASWKT.apply(v).toString(); + } + + public static GeoShape stWktToSql(String wktString) { + return StWkttosqlProcessor.apply(wktString); + } + + public static Double stDistance(Object v1, Object v2) { + return StDistanceProcessor.process(v1, v2); + } + + public static String stGeometryType(Object g) { + return (String) GeoProcessor.GeoOperation.GEOMETRY_TYPE.apply(g); + } + + public static Double stX(Object g) { + return (Double) GeoProcessor.GeoOperation.X.apply(g); + } + + public static Double stY(Object g) { + return (Double) GeoProcessor.GeoOperation.Y.apply(g); + } + + public static Double stZ(Object g) { + return (Double) GeoProcessor.GeoOperation.Z.apply(g); + } + + // processes doc value as a geometry + public static GeoShape geoDocValue(Map> doc, String fieldName) { + Object obj = docValue(doc, fieldName); + if (obj != null) { + if (obj instanceof GeoPoint) { + return new GeoShape(((GeoPoint) obj).getLon(), ((GeoPoint) obj).getLat()); + } + // TODO: Add support for geo_shapes when it is there + } + return null; + } + // // Casting // diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java index b24ec56727d64..223e22b2a33ba 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.expression.literal.IntervalDayTime; import org.elasticsearch.xpack.sql.expression.literal.IntervalYearMonth; import org.elasticsearch.xpack.sql.type.DataType; @@ -95,6 +96,13 @@ default ScriptTemplate scriptWithFoldable(Expression foldable) { dataType()); } + if (fold instanceof GeoShape) { + GeoShape geoShape = (GeoShape) fold; + return new ScriptTemplate(processScript("{sql}.stWktToSql({})"), + paramsBuilder().variable(geoShape.toString()).build(), + dataType()); + } + return new ScriptTemplate(processScript("{}"), paramsBuilder().variable(fold).build(), dataType()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java index b06a1fb887433..ed7dc9da77543 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Intervals.java @@ -408,5 +408,4 @@ public static TemporalAmount negate(TemporalAmount interval) { public static TemporalAmount parseInterval(Source source, String value, DataType intervalType) { return PARSERS.get(intervalType).parse(source, value); } - } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java index 333ba3f11c0b1..d6bdeeb0fe46b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/Literals.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.expression.literal; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import java.util.ArrayList; import java.util.Collection; @@ -30,6 +31,7 @@ public static Collection getNamedWriteab entries.add(new NamedWriteableRegistry.Entry(IntervalDayTime.class, IntervalDayTime.NAME, IntervalDayTime::new)); entries.add(new NamedWriteableRegistry.Entry(IntervalYearMonth.class, IntervalYearMonth.NAME, IntervalYearMonth::new)); + entries.add(new NamedWriteableRegistry.Entry(GeoShape.class, GeoShape.NAME, GeoShape::new)); return entries; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index 8495b0269eb84..7e5516810d92a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.sql.planner; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.Attribute; @@ -38,6 +40,8 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeHistogramFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistance; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; import org.elasticsearch.xpack.sql.expression.literal.Intervals; import org.elasticsearch.xpack.sql.expression.predicate.Range; @@ -85,6 +89,7 @@ import org.elasticsearch.xpack.sql.querydsl.agg.TopHitsAgg; import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; import org.elasticsearch.xpack.sql.querydsl.query.ExistsQuery; +import org.elasticsearch.xpack.sql.querydsl.query.GeoDistanceQuery; import org.elasticsearch.xpack.sql.querydsl.query.MatchQuery; import org.elasticsearch.xpack.sql.querydsl.query.MultiMatchQuery; import org.elasticsearch.xpack.sql.querydsl.query.NestedQuery; @@ -656,6 +661,24 @@ private static Query translateQuery(BinaryComparison bc) { Object value = valueOf(bc.right()); String format = dateFormat(bc.left()); + // Possible geo optimization + if (bc.left() instanceof StDistance && value instanceof Number) { + if (bc instanceof LessThan || bc instanceof LessThanOrEqual) { + // Special case for ST_Distance translatable into geo_distance query + StDistance stDistance = (StDistance) bc.left(); + if (stDistance.left() instanceof FieldAttribute && stDistance.right().foldable()) { + Object geoShape = valueOf(stDistance.right()); + if (geoShape instanceof GeoShape) { + Geometry geometry = ((GeoShape) geoShape).toGeometry(); + if (geometry instanceof Point) { + String field = nameOf(stDistance.left()); + return new GeoDistanceQuery(source, field, ((Number) value).doubleValue(), + ((Point) geometry).getLat(), ((Point) geometry).getLon()); + } + } + } + } + } if (bc instanceof GreaterThan) { return new RangeQuery(source, name, value, false, null, false, format); } @@ -954,6 +977,9 @@ public QueryTranslation translate(Expression exp, boolean onAggs) { protected static Query handleQuery(ScalarFunction sf, Expression field, Supplier query) { Query q = query.get(); + if (field instanceof StDistance && q instanceof GeoDistanceQuery) { + return wrapIfNested(q, ((StDistance) field).left()); + } if (field instanceof FieldAttribute) { return wrapIfNested(q, field); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/GeoDistanceQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/GeoDistanceQuery.java new file mode 100644 index 0000000000000..dd1a1171c1603 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/GeoDistanceQuery.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.common.unit.DistanceUnit; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.sql.tree.Source; + +import java.util.Objects; + +public class GeoDistanceQuery extends LeafQuery { + + private final String field; + private final double lat; + private final double lon; + private final double distance; + + public GeoDistanceQuery(Source source, String field, double distance, double lat, double lon) { + super(source); + this.field = field; + this.distance = distance; + this.lat = lat; + this.lon = lon; + } + + public String field() { + return field; + } + + public double lat() { + return lat; + } + + public double lon() { + return lon; + } + + public double distance() { + return distance; + } + + @Override + public QueryBuilder asBuilder() { + return QueryBuilders.geoDistanceQuery(field).distance(distance, DistanceUnit.METERS).point(lat, lon); + } + + @Override + public int hashCode() { + return Objects.hash(field, distance, lat, lon); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GeoDistanceQuery other = (GeoDistanceQuery) obj; + return Objects.equals(field, other.field) && + Objects.equals(distance, other.distance) && + Objects.equals(lat, other.lat) && + Objects.equals(lon, other.lon); + } + + @Override + protected String innerToString() { + return field + ":" + "(" + distance + "," + "(" + lat + ", " + lon + "))"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java index 1f04e7c8e1982..76f2436e8629c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -53,6 +53,9 @@ public enum DataType { // // specialized types // + GEO_SHAPE( ExtTypes.GEOMETRY, Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, false, false, false), + // display size = 2 doubles + len("POINT( )") + GEO_POINT( ExtTypes.GEOMETRY, Double.BYTES*2, Integer.MAX_VALUE, 25 * 2 + 8, false, false, false), // IP can be v4 or v6. The latter has 2^128 addresses or 340,282,366,920,938,463,463,374,607,431,768,211,456 // aka 39 chars IP( "ip", JDBCType.VARCHAR, 39, 39, 0, false, false, true), @@ -251,6 +254,10 @@ public boolean isPrimitive() { return this != OBJECT && this != NESTED && this != UNSUPPORTED; } + public boolean isGeo() { + return this == GEO_POINT || this == GEO_SHAPE; + } + public boolean isDateBased() { return this == DATE || this == DATETIME; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java index dcd6a1b35a13e..3f985ae4e3b6e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.type; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.expression.literal.Interval; import java.time.OffsetTime; @@ -81,6 +82,9 @@ public static DataType fromJava(Object value) { if (value instanceof Interval) { return ((Interval) value).dataType(); } + if (value instanceof GeoShape) { + return DataType.GEO_SHAPE; + } throw new SqlIllegalArgumentException("No idea what's the DataType for {}", value.getClass()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java index 1ad9dd92abfec..2c07be3eb620d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/ExtTypes.java @@ -27,7 +27,8 @@ enum ExtTypes implements SQLType { INTERVAL_DAY_TO_SECOND(110), INTERVAL_HOUR_TO_MINUTE(111), INTERVAL_HOUR_TO_SECOND(112), - INTERVAL_MINUTE_TO_SECOND(113); + INTERVAL_MINUTE_TO_SECOND(113), + GEOMETRY(114); private final Integer type; diff --git a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt index 4ac4632572ca0..6d24ea79f2bc2 100644 --- a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt +++ b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt @@ -4,7 +4,14 @@ # you may not use this file except in compliance with the Elastic License. # -# This file contains a whitelist for SQL specific utilities available inside SQL scripting +# This file contains a whitelist for SQL specific utilities and classes available inside SQL scripting + +#### Classes + +class org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape { + +} + class org.elasticsearch.xpack.sql.expression.literal.IntervalDayTime { } @@ -137,7 +144,19 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalS String space(Number) String substring(String, Number, Number) String ucase(String) - + +# +# Geo Functions +# + GeoShape geoDocValue(java.util.Map, String) + String stAswkt(Object) + Double stDistance(Object, Object) + String stGeometryType(Object) + GeoShape stWktToSql(String) + Double stX(Object) + Double stY(Object) + Double stZ(Object) + # # Casting # diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java index bc7b85b5392e9..b36111ffac3bb 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java @@ -158,7 +158,7 @@ public void testDottedFieldPathTypo() { public void testStarExpansionExcludesObjectAndUnsupportedTypes() { LogicalPlan plan = plan("SELECT * FROM test"); List list = ((Project) plan).projections(); - assertThat(list, hasSize(8)); + assertThat(list, hasSize(10)); List names = Expressions.names(list); assertThat(names, not(hasItem("some"))); assertThat(names, not(hasItem("some.dotted"))); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index dcf8dad5ecb79..609e6a52c3e0f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -773,4 +773,28 @@ public void testAggregateAliasInFilter() { public void testProjectUnresolvedAliasInFilter() { assertEquals("1:8: Unknown column [tni]", error("SELECT tni AS i FROM test WHERE i > 10 GROUP BY i")); } + + public void testGeoShapeInWhereClause() { + assertEquals("1:49: geo shapes cannot be used for filtering", + error("SELECT ST_AsWKT(shape) FROM test WHERE ST_AsWKT(shape) = 'point (10 20)'")); + + // We get only one message back because the messages are grouped by the node that caused the issue + assertEquals("1:46: geo shapes cannot be used for filtering", + error("SELECT MAX(ST_X(shape)) FROM test WHERE ST_Y(shape) > 10 GROUP BY ST_GEOMETRYTYPE(shape) ORDER BY ST_ASWKT(shape)")); + } + + public void testGeoShapeInGroupBy() { + assertEquals("1:44: geo shapes cannot be used in grouping", + error("SELECT ST_X(shape) FROM test GROUP BY ST_X(shape)")); + } + + public void testGeoShapeInOrderBy() { + assertEquals("1:44: geo shapes cannot be used for sorting", + error("SELECT ST_X(shape) FROM test ORDER BY ST_Z(shape)")); + } + + public void testGeoShapeInSelect() { + accept("SELECT ST_X(shape) FROM test"); + } + } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index 973d5b50fad00..50a3b185dba86 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.DateUtils; @@ -451,6 +452,125 @@ public void testObjectsForSourceValue() throws IOException { assertThat(ex.getMessage(), is("Objects (returned by [" + fieldName + "]) are not supported")); } + public void testGeoShapeExtraction() { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_SHAPE, UTC, false); + Map map = new HashMap<>(); + map.put(fieldName, "POINT (1 2)"); + assertEquals(new GeoShape(1, 2), fe.extractFromSource(map)); + + map = new HashMap<>(); + assertNull(fe.extractFromSource(map)); + } + + + public void testMultipleGeoShapeExtraction() { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_SHAPE, UTC, false); + Map map = new HashMap<>(); + map.put(fieldName, "POINT (1 2)"); + assertEquals(new GeoShape(1, 2), fe.extractFromSource(map)); + + map = new HashMap<>(); + assertNull(fe.extractFromSource(map)); + + Map map2 = new HashMap<>(); + map2.put(fieldName, Arrays.asList("POINT (1 2)", "POINT (3 4)")); + SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map2)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); + + FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, DataType.GEO_SHAPE, UTC, false, true); + assertEquals(new GeoShape(1, 2), lenientFe.extractFromSource(map2)); + } + + public void testGeoPointExtractionFromSource() throws IOException { + int layers = randomIntBetween(1, 3); + String pathCombined = ""; + double lat = randomDoubleBetween(-90, 90, true); + double lon = randomDoubleBetween(-180, 180, true); + SearchHit hit = new SearchHit(1); + XContentBuilder source = JsonXContent.contentBuilder(); + boolean[] arrayWrap = new boolean[layers - 1]; + source.startObject(); { + for (int i = 0; i < layers - 1; i++) { + arrayWrap[i] = randomBoolean(); + String name = randomAlphaOfLength(10); + source.field(name); + if (arrayWrap[i]) { + source.startArray(); + } + source.startObject(); + pathCombined = pathCombined + name + "."; + } + String name = randomAlphaOfLength(10); + pathCombined = pathCombined + name; + source.field(name, randomPoint(lat, lon)); + for (int i = layers - 2; i >= 0; i--) { + source.endObject(); + if (arrayWrap[i]) { + source.endArray(); + } + } + } + source.endObject(); + BytesReference sourceRef = BytesReference.bytes(source); + hit.sourceRef(sourceRef); + + FieldHitExtractor fe = new FieldHitExtractor(pathCombined, DataType.GEO_POINT, UTC, false); + assertEquals(new GeoShape(lon, lat), fe.extract(hit)); + } + + public void testMultipleGeoPointExtractionFromSource() throws IOException { + double lat = randomDoubleBetween(-90, 90, true); + double lon = randomDoubleBetween(-180, 180, true); + SearchHit hit = new SearchHit(1); + String fieldName = randomAlphaOfLength(5); + int arraySize = randomIntBetween(2, 4); + XContentBuilder source = JsonXContent.contentBuilder(); + source.startObject(); { + source.startArray(fieldName); + source.value(randomPoint(lat, lon)); + for (int i = 1; i < arraySize; i++) { + source.value(randomPoint(lat, lon)); + } + source.endArray(); + } + source.endObject(); + BytesReference sourceRef = BytesReference.bytes(source); + hit.sourceRef(sourceRef); + + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, false); + SqlException ex = expectThrows(SqlException.class, () -> fe.extract(hit)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); + + FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, false, true); + assertEquals(new GeoShape(lon, lat), lenientFe.extract(hit)); + } + + public void testGeoPointExtractionFromDocValues() { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, true); + SearchHit hit = new SearchHit(1); + DocumentField field = new DocumentField(fieldName, singletonList("2, 1")); + hit.fields(singletonMap(fieldName, field)); + assertEquals(new GeoShape(1, 2), fe.extract(hit)); + hit = new SearchHit(1); + assertNull(fe.extract(hit)); + } + + public void testGeoPointExtractionFromMultipleDocValues() { + String fieldName = randomAlphaOfLength(5); + SearchHit hit = new SearchHit(1); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, true); + + hit.fields(singletonMap(fieldName, new DocumentField(fieldName, Arrays.asList("2,1", "3,4")))); + SqlException ex = expectThrows(SqlException.class, () -> fe.extract(hit)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); + + FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, DataType.GEO_POINT, UTC, true, true); + assertEquals(new GeoShape(1, 2), lenientFe.extract(hit)); + } + private FieldHitExtractor getFieldHitExtractor(String fieldName, boolean useDocValue) { return new FieldHitExtractor(fieldName, null, UTC, useDocValue); } @@ -471,4 +591,18 @@ private Object randomNonNullValue() { ESTestCase::randomDouble)); return value.get(); } + + private Object randomPoint(double lat, double lon) { + Supplier value = randomFrom(Arrays.asList( + () -> lat + "," + lon, + () -> Arrays.asList(lon, lat), + () -> { + Map map1 = new HashMap<>(); + map1.put("lat", lat); + map1.put("lon", lon); + return map1; + } + )); + return value.get(); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessorTests.java new file mode 100644 index 0000000000000..07cc6171cf013 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/GeoProcessorTests.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor.GeoOperation; + +import java.io.IOException; + +public class GeoProcessorTests extends AbstractWireSerializingTestCase { + public static GeoProcessor randomGeoProcessor() { + return new GeoProcessor(randomFrom(GeoOperation.values())); + } + + @Override + protected GeoProcessor createTestInstance() { + return randomGeoProcessor(); + } + + @Override + protected Reader instanceReader() { + return GeoProcessor::new; + } + + @Override + protected GeoProcessor mutateInstance(GeoProcessor instance) throws IOException { + return new GeoProcessor(randomValueOtherThan(instance.processor(), () -> randomFrom(GeoOperation.values()))); + } + + public void testApplyAsWKT() throws Exception { + assertEquals("point (10.0 20.0)", new GeoProcessor(GeoOperation.ASWKT).process(new GeoShape(10, 20))); + assertEquals("point (10.0 20.0)", new GeoProcessor(GeoOperation.ASWKT).process(new GeoShape("POINT (10 20)"))); + } + + public void testApplyGeometryType() throws Exception { + assertEquals("POINT", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape(10, 20))); + assertEquals("POINT", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("POINT (10 20)"))); + assertEquals("MULTIPOINT", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("multipoint (2.0 1.0)"))); + assertEquals("LINESTRING", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("LINESTRING (3.0 1.0, 4.0 2.0)"))); + assertEquals("POLYGON", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertEquals("MULTILINESTRING", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("multilinestring ((3.0 1.0, 4.0 2.0), (2.0 1.0, 5.0 6.0))"))); + assertEquals("MULTIPOLYGON", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("multipolygon (((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0)))"))); + assertEquals("ENVELOPE", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process(new GeoShape("bbox (10.0, 20.0, 40.0, 30.0)"))); + assertEquals("GEOMETRYCOLLECTION", new GeoProcessor(GeoOperation.GEOMETRY_TYPE).process( + new GeoShape("geometrycollection (point (20.0 10.0),point (1.0 2.0))"))); + } + + + public void testApplyGetXYZ() throws Exception { + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape(10, 20))); + assertEquals(20.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape(10, 20))); + assertNull(new GeoProcessor(GeoOperation.Z).process(new GeoShape(10, 20))); + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("POINT (10 20)"))); + assertEquals(20.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("POINT (10 20)"))); + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("POINT (10 20 30)"))); + assertEquals(20.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("POINT (10 20 30)"))); + assertEquals(30.0, new GeoProcessor(GeoOperation.Z).process(new GeoShape("POINT (10 20 30)"))); + assertEquals(2.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("multipoint (2.0 1.0)"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("multipoint (2.0 1.0)"))); + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("LINESTRING (3.0 1.0, 4.0 2.0)"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("LINESTRING (3.0 1.0, 4.0 2.0)"))); + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process( + new GeoShape("multilinestring ((3.0 1.0, 4.0 2.0), (2.0 1.0, 5.0 6.0))"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process( + new GeoShape("multilinestring ((3.0 1.0, 4.0 2.0), (2.0 1.0, 5.0 6.0))"))); + // minX minX, maxX, maxY, minY + assertEquals(10.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("bbox (10.0, 20.0, 40.0, 30.0)"))); + // minY minX, maxX, maxY, minY + assertEquals(30.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("bbox (10.0, 20.0, 40.0, 30.0)"))); + assertEquals(20.0, new GeoProcessor(GeoOperation.X).process( + new GeoShape("geometrycollection (point (20.0 10.0),point (1.0 2.0))"))); + assertEquals(10.0, new GeoProcessor(GeoOperation.Y).process( + new GeoShape("geometrycollection (point (20.0 10.0),point (1.0 2.0))"))); + } + + public void testApplyGetXYZToPolygons() throws Exception { + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertNull(new GeoProcessor(GeoOperation.Z).process(new GeoShape("polygon ((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0))"))); + assertEquals(5.0, new GeoProcessor(GeoOperation.Z).process( + new GeoShape("polygon ((3.0 1.0 5.0, 4.0 2.0 6.0, 4.0 3.0 7.0, 3.0 1.0 5.0))"))); + assertEquals(3.0, new GeoProcessor(GeoOperation.X).process(new GeoShape("multipolygon (((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0)))"))); + assertEquals(1.0, new GeoProcessor(GeoOperation.Y).process(new GeoShape("multipolygon (((3.0 1.0, 4.0 2.0, 4.0 3.0, 3.0 1.0)))"))); + } + + public void testApplyNull() { + for (GeoOperation op : GeoOperation.values()) { + GeoProcessor proc = new GeoProcessor(op); + assertNull(proc.process(null)); + } + } + + public void testTypeCheck() { + GeoProcessor proc = new GeoProcessor(GeoOperation.ASWKT); + SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, () -> proc.process("string")); + assertEquals("A geo_point or geo_shape is required; received [string]", siae.getMessage()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessorTests.java new file mode 100644 index 0000000000000..9f78f8b3df43b --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StDistanceProcessorTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.gen.processor.ChainingProcessor; +import org.elasticsearch.xpack.sql.expression.gen.processor.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.FunctionTestUtils.l; +import static org.elasticsearch.xpack.sql.tree.Source.EMPTY; +import static org.hamcrest.Matchers.instanceOf; + +public class StDistanceProcessorTests extends AbstractWireSerializingTestCase { + + public StDistanceProcessor createTestInstance() { + return new StDistanceProcessor( + constantPoint(randomDoubleBetween(-180, 180, true), randomDoubleBetween(-90, 90, true)), + constantPoint(randomDoubleBetween(-180, 180, true), randomDoubleBetween(-90, 90, true)) + ); + } + + public static Processor constantPoint(double lon, double lat) { + return new ChainingProcessor(new ConstantProcessor("point (" + lon + " " + lat + ")"), StWkttosqlProcessor.INSTANCE); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } + + public void testApply() { + StDistanceProcessor proc = new StDistanceProcessor(constantPoint(10, 20), constantPoint(30, 40)); + Object result = proc.process(null); + assertThat(result, instanceOf(Double.class)); + assertEquals(GeoUtils.arcDistance(20, 10, 40, 30), (double) result, 0.000001); + } + + public void testNullHandling() { + assertNull(new StDistance(EMPTY, l(new GeoShape(1, 2)), l(null)).makePipe().asProcessor().process(null)); + assertNull(new StDistance(EMPTY, l(null), l(new GeoShape(1, 2))).makePipe().asProcessor().process(null)); + } + + public void testTypeCheck() { + SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, + () -> new StDistance(EMPTY, l("foo"), l(new GeoShape(1, 2))).makePipe().asProcessor().process(null)); + assertEquals("A geo_point or geo_shape with type point is required; received [foo]", siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, + () -> new StDistance(EMPTY, l(new GeoShape(1, 2)), l("bar")).makePipe().asProcessor().process(null)); + assertEquals("A geo_point or geo_shape with type point is required; received [bar]", siae.getMessage()); + } + + @Override + protected Writeable.Reader instanceReader() { + return StDistanceProcessor::new; + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessorTests.java new file mode 100644 index 0000000000000..fc7b33ae905d7 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/geo/StWkttosqlProcessorTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.geo; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import static org.hamcrest.Matchers.instanceOf; + +public class StWkttosqlProcessorTests extends ESTestCase { + public static StWkttosqlProcessor randomStWkttosqlProcessor() { + return new StWkttosqlProcessor(); + } + + public void testApply() { + StWkttosqlProcessor proc = new StWkttosqlProcessor(); + assertNull(proc.process(null)); + Object result = proc.process("POINT (10 20)"); + assertThat(result, instanceOf(GeoShape.class)); + GeoShape geoShape = (GeoShape) result; + assertEquals("point (10.0 20.0)", geoShape.toString()); + } + + public void testTypeCheck() { + StWkttosqlProcessor procPoint = new StWkttosqlProcessor(); + SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process(42)); + assertEquals("A string is required; received [42]", siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process("some random string")); + assertEquals("Cannot parse [some random string] as a geo_shape value", siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process("point (foo bar)")); + assertEquals("Cannot parse [point (foo bar)] as a geo_shape value", siae.getMessage()); + + + siae = expectThrows(SqlIllegalArgumentException.class, () -> procPoint.process("point (10 10")); + assertEquals("Cannot parse [point (10 10] as a geo_shape value", siae.getMessage()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index cf6530e2188ff..93f6515f71062 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.IsoWeekOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MonthOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Year; +import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistance; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ACos; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ASin; import org.elasticsearch.xpack.sql.expression.function.scalar.math.ATan; @@ -764,6 +765,15 @@ public void testLiteralsOnTheRight() { assertEquals(FIVE, nullEquals.right()); } + public void testLiteralsOnTheRightInStDistance() { + Alias a = new Alias(EMPTY, "a", L(10)); + Expression result = new BooleanLiteralsOnTheRight().rule(new StDistance(EMPTY, FIVE, a)); + assertTrue(result instanceof StDistance); + StDistance sd = (StDistance) result; + assertEquals(a, sd.left()); + assertEquals(FIVE, sd.right()); + } + public void testBoolSimplifyNotIsNullAndNotIsNotNull() { BooleanSimplification simplification = new BooleanSimplification(); assertTrue(simplification.rule(new Not(EMPTY, new IsNull(EMPTY, ONE))) instanceof IsNotNull); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java index f3f2d9569c53f..9c8c32689b70e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -57,7 +57,7 @@ public void testSysColumns() { SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, randomValueOtherThanMany(Mode::isDriver, () -> randomFrom(Mode.values()))); // nested fields are ignored - assertEquals(13, rows.size()); + assertEquals(15, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -144,7 +144,7 @@ public void testSysColumnsInOdbcMode() { List> rows = new ArrayList<>(); SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, Mode.ODBC); - assertEquals(13, rows.size()); + assertEquals(15, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -233,7 +233,7 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, nullable(row).getClass()); assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); - + row = rows.get(9); assertEquals("some.ambiguous", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); @@ -279,7 +279,7 @@ public void testSysColumnsInJdbcMode() { List> rows = new ArrayList<>(); SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, Mode.JDBC); - assertEquals(13, rows.size()); + assertEquals(15, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -463,7 +463,7 @@ public void testSysColumnsNoArg() throws Exception { public void testSysColumnsWithCatalogWildcard() throws Exception { executeCommand("SYS COLUMNS CATALOG 'cluster' TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { - assertEquals(13, r.size()); + assertEquals(14, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); assertEquals("test", r.column(2)); assertEquals("bool", r.column(3)); @@ -476,7 +476,7 @@ public void testSysColumnsWithCatalogWildcard() throws Exception { public void testSysColumnsWithMissingCatalog() throws Exception { executeCommand("SYS COLUMNS TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { - assertEquals(13, r.size()); + assertEquals(14, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); assertEquals("test", r.column(2)); assertEquals("bool", r.column(3)); @@ -489,7 +489,7 @@ public void testSysColumnsWithMissingCatalog() throws Exception { public void testSysColumnsWithNullCatalog() throws Exception { executeCommand("SYS COLUMNS CATALOG ? TABLE LIKE 'test' LIKE '%'", singletonList(new SqlTypedParamValue("keyword", null)), r -> { - assertEquals(13, r.size()); + assertEquals(14, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); assertEquals("test", r.column(2)); assertEquals("bool", r.column(3)); @@ -529,4 +529,4 @@ private Tuple sql(String sql, List para SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); return new Tuple<>(cmd, session); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java index 4a8da68a1d51e..805268dd5b687 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java @@ -48,7 +48,7 @@ public void testSysTypes() { "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", "INTERVAL_HOUR_TO_MINUTE", "INTERVAL_HOUR_TO_SECOND", "INTERVAL_MINUTE_TO_SECOND", - "UNSUPPORTED", "OBJECT", "NESTED"); + "GEO_SHAPE", "GEO_POINT", "UNSUPPORTED", "OBJECT", "NESTED"); cmd.execute(null, wrap(r -> { assertEquals(19, r.columnCount()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 0543e65d4ae46..693840bd65c34 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.sql.querydsl.agg.GroupByDateHistogram; import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; import org.elasticsearch.xpack.sql.querydsl.query.ExistsQuery; +import org.elasticsearch.xpack.sql.querydsl.query.GeoDistanceQuery; import org.elasticsearch.xpack.sql.querydsl.query.NotQuery; import org.elasticsearch.xpack.sql.querydsl.query.Query; import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery; @@ -65,6 +66,7 @@ import static org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation.PI; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.startsWith; public class QueryTranslatorTests extends ESTestCase { @@ -496,7 +498,7 @@ public void testTranslateMathFunction_HavingClause_Painless() { assertNull(translation.query); AggFilter aggFilter = translation.aggFilter; assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.gt(InternalSqlScriptUtils." + - operation.name().toLowerCase(Locale.ROOT) + "(params.a0),params.v0))", + operation.name().toLowerCase(Locale.ROOT) + "(params.a0),params.v0))", aggFilter.scriptTemplate().toString()); assertThat(aggFilter.scriptTemplate().params().toString(), startsWith("[{a=max(int){a->")); assertThat(aggFilter.scriptTemplate().params().toString(), endsWith(", {v=10}]")); @@ -561,6 +563,109 @@ public void testGroupByAndHavingWithFunctionOnTopOfAggregation() { assertThat(aggFilter.scriptTemplate().params().toString(), endsWith(", {v=10}]")); } + public void testTranslateStAsWktForPoints() { + LogicalPlan p = plan("SELECT ST_AsWKT(point) FROM test WHERE ST_AsWKT(point) = 'point (10 20)'"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, true); + assertNull(translation.query); + AggFilter aggFilter = translation.aggFilter; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.eq(" + + "InternalSqlScriptUtils.stAswkt(InternalSqlScriptUtils.geoDocValue(doc,params.v0))," + + "params.v1)" + + ")", + aggFilter.scriptTemplate().toString()); + assertEquals("[{v=point}, {v=point (10 20)}]", aggFilter.scriptTemplate().params().toString()); + } + + public void testTranslateStWktToSql() { + LogicalPlan p = plan("SELECT shape FROM test WHERE ST_WKTToSQL(keyword) = ST_WKTToSQL('point (10 20)')"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, true); + assertNull(translation.query); + AggFilter aggFilter = translation.aggFilter; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(" + + "InternalSqlScriptUtils.eq(InternalSqlScriptUtils.stWktToSql(" + + "InternalSqlScriptUtils.docValue(doc,params.v0)),InternalSqlScriptUtils.stWktToSql(params.v1)))", + aggFilter.scriptTemplate().toString()); + assertEquals("[{v=keyword}, {v=point (10.0 20.0)}]", aggFilter.scriptTemplate().params().toString()); + } + + public void testTranslateStDistanceToScript() { + String operator = randomFrom(">", ">="); + String operatorFunction = operator.equalsIgnoreCase(">") ? "gt" : "gte"; + LogicalPlan p = plan("SELECT shape FROM test WHERE ST_Distance(point, ST_WKTToSQL('point (10 20)')) " + operator + " 20"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertTrue(translation.query instanceof ScriptQuery); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(" + + "InternalSqlScriptUtils." + operatorFunction + "(" + + "InternalSqlScriptUtils.stDistance(" + + "InternalSqlScriptUtils.geoDocValue(doc,params.v0),InternalSqlScriptUtils.stWktToSql(params.v1)),params.v2))", + sc.script().toString()); + assertEquals("[{v=point}, {v=point (10.0 20.0)}, {v=20}]", sc.script().params().toString()); + } + + public void testTranslateStDistanceToQuery() { + String operator = randomFrom("<", "<="); + LogicalPlan p = plan("SELECT shape FROM test WHERE ST_Distance(point, ST_WKTToSQL('point (10 20)')) " + operator + " 25"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertTrue(translation.query instanceof GeoDistanceQuery); + GeoDistanceQuery gq = (GeoDistanceQuery) translation.query; + assertEquals("point", gq.field()); + assertEquals(20.0, gq.lat(), 0.00001); + assertEquals(10.0, gq.lon(), 0.00001); + assertEquals(25.0, gq.distance(), 0.00001); + } + + public void testTranslateStXY() { + String dim = randomFrom("X", "Y"); + LogicalPlan p = plan("SELECT ST_AsWKT(point) FROM test WHERE ST_" + dim + "(point) = 10"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertThat(translation.query, instanceOf(ScriptQuery.class)); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.eq(InternalSqlScriptUtils.st" + dim + "(" + + "InternalSqlScriptUtils.geoDocValue(doc,params.v0)),params.v1))", + sc.script().toString()); + assertEquals("[{v=point}, {v=10}]", sc.script().params().toString()); + } + + public void testTranslateStGeometryType() { + LogicalPlan p = plan("SELECT ST_AsWKT(point) FROM test WHERE ST_GEOMETRYTYPE(point) = 'POINT'"); + assertThat(p, instanceOf(Project.class)); + assertThat(p.children().get(0), instanceOf(Filter.class)); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertThat(translation.query, instanceOf(ScriptQuery.class)); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.eq(InternalSqlScriptUtils.stGeometryType(" + + "InternalSqlScriptUtils.geoDocValue(doc,params.v0)),params.v1))", + sc.script().toString()); + assertEquals("[{v=point}, {v=POINT}]", sc.script().params().toString()); + } + public void testTranslateCoalesce_GroupBy_Painless() { LogicalPlan p = plan("SELECT COALESCE(int, 10) FROM test GROUP BY 1"); assertTrue(p instanceof Aggregate); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java index 65b491fe71a1d..997de6e2f5c53 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java @@ -170,8 +170,11 @@ public void testNestedDoc() { public void testGeoField() { Map mapping = loadMapping("mapping-geo.json"); - EsField dt = mapping.get("location"); - assertThat(dt.getDataType().typeName, is("unsupported")); + assertThat(mapping.size(), is(2)); + EsField gp = mapping.get("location"); + assertThat(gp.getDataType().typeName, is("geo_point")); + EsField gs = mapping.get("site"); + assertThat(gs.getDataType().typeName, is("geo_shape")); } public void testIpField() { diff --git a/x-pack/plugin/sql/src/test/resources/mapping-geo.json b/x-pack/plugin/sql/src/test/resources/mapping-geo.json index 3c958ff37edfc..e6e499ef82e83 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-geo.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-geo.json @@ -2,6 +2,9 @@ "properties" : { "location" : { "type" : "geo_point" + }, + "site": { + "type" : "geo_shape" } } } diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json index d93633f7aced0..c75ecfdc845c0 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json @@ -44,6 +44,8 @@ } } }, - "foo_type" : { "type" : "foo" } + "foo_type" : { "type" : "foo" }, + "point": {"type" : "geo_point"}, + "shape": {"type" : "geo_shape"} } } diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json index 448c50e6a9f0a..e46d64a45e88f 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json @@ -6,6 +6,7 @@ "keyword" : { "type" : "keyword" }, "unsupported" : { "type" : "ip_range" }, "date" : { "type" : "date"}, + "shape": { "type" : "geo_shape" }, "some" : { "properties" : { "dotted" : { From 64e3f647b829ef17dd784a1b0787bb6e37512a3b Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 14 May 2019 10:34:20 +0100 Subject: [PATCH 091/321] Log cluster UUID when committed (#42065) Today we do not expose the cluster UUID in any logs by default, but it would be useful to see it. For instance if a user starts multiple nodes as separate clusters then they will silently remain as separate clusters even if they are subsequently reconfigured to look like a single cluster. This change logs the committed cluster UUID the first time the node encounters it. --- .../cluster/coordination/CoordinationState.java | 1 + .../org/elasticsearch/cluster/coordination/Coordinator.java | 6 +++++- server/src/main/java/org/elasticsearch/node/Node.java | 6 ++++-- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index a38a383b269d5..ac75c83c19a26 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -480,6 +480,7 @@ default void markLastAcceptedStateAsCommitted() { metaDataBuilder = MetaData.builder(lastAcceptedState.metaData()); } metaDataBuilder.clusterUUIDCommitted(true); + logger.info("cluster UUID set to [{}]", lastAcceptedState.metaData().clusterUUID()); } if (metaDataBuilder != null) { setLastAcceptedState(ClusterState.builder(lastAcceptedState).metaData(metaDataBuilder).build()); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 457cfcb15486e..6304588e3121a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -647,7 +647,11 @@ protected void doStart() { coordinationState.set(new CoordinationState(settings, getLocalNode(), persistedState)); peerFinder.setCurrentTerm(getCurrentTerm()); configuredHostsResolver.start(); - VotingConfiguration votingConfiguration = coordinationState.get().getLastAcceptedState().getLastCommittedConfiguration(); + final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState(); + if (lastAcceptedState.metaData().clusterUUIDCommitted()) { + logger.info("cluster UUID [{}]", lastAcceptedState.metaData().clusterUUID()); + } + final VotingConfiguration votingConfiguration = lastAcceptedState.getLastCommittedConfiguration(); if (singleNodeDiscovery && votingConfiguration.isEmpty() == false && votingConfiguration.hasQuorum(Collections.singleton(getLocalNode().getId())) == false) { diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 4b77c06447836..f0ec86f8cb84c 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -40,6 +40,7 @@ import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.InternalClusterInfoService; @@ -271,8 +272,9 @@ protected Node( nodeEnvironment = new NodeEnvironment(tmpSettings, environment); resourcesToClose.add(nodeEnvironment); - logger.info("node name [{}], node ID [{}]", - NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId()); + logger.info("node name [{}], node ID [{}], cluster name [{}]", + NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId(), + ClusterName.CLUSTER_NAME_SETTING.get(tmpSettings).value()); final JvmInfo jvmInfo = JvmInfo.jvmInfo(); logger.info( From 750eb6e88ce2715eaee0281fd97fdbc7c5fc8de6 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Tue, 14 May 2019 21:39:46 +1000 Subject: [PATCH 092/321] Fix test for JDBC version check (#42139) The testExceptionThrownOnIncompatibleVersions test simply requires that the randomly selected version is not equal to the current version. The previous implementation of this test would sometimes randomly select CURRENT. --- .../org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java index bb8b06287a11c..29406b6209893 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java @@ -25,7 +25,8 @@ public class VersionParityTests extends WebServerTestCase { public void testExceptionThrownOnIncompatibleVersions() throws IOException, SQLException { - Version version = VersionUtils.randomPreviousCompatibleVersion(random(), Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), null, VersionUtils.getPreviousVersion()); + logger.info("Checking exception is thrown for version {}", version); prepareRequest(version); String url = JdbcConfiguration.URL_PREFIX + webServer().getHostName() + ":" + webServer().getPort(); From 77d0c79e33cc2e256f3f2d3aff1158f153284254 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 14 May 2019 07:45:14 -0400 Subject: [PATCH 093/321] Adjust load and timeout in testShrinkIndexPrimaryTerm (#42098) This test can create and shuffle 2*(3*5*7) = 210 shards which is quite heavy for our CI. This commit reduces the load, so we don't timeout on CI. Closes #28153 --- .../action/admin/indices/create/ShrinkIndexIT.java | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index feeb9646e40bf..b14bdd0ed9883 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -65,7 +65,6 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Arrays; -import java.util.List; import java.util.Map; import java.util.stream.IntStream; @@ -165,11 +164,8 @@ public void testCreateShrinkIndexToN() { } public void testShrinkIndexPrimaryTerm() throws Exception { - final List factors = Arrays.asList(2, 3, 5, 7); - final List numberOfShardsFactors = randomSubsetOf(scaledRandomIntBetween(1, factors.size() - 1), factors); - final int numberOfShards = numberOfShardsFactors.stream().reduce(1, (x, y) -> x * y); - final int numberOfTargetShards = randomSubsetOf(randomInt(numberOfShardsFactors.size() - 1), numberOfShardsFactors) - .stream().reduce(1, (x, y) -> x * y); + int numberOfShards = randomIntBetween(2, 20); + int numberOfTargetShards = randomValueOtherThanMany(n -> numberOfShards % n != 0, () -> randomIntBetween(1, numberOfShards - 1)); internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get(); @@ -218,7 +214,7 @@ public void testShrinkIndexPrimaryTerm() throws Exception { final Settings.Builder prepareShrinkSettings = Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true); client().admin().indices().prepareUpdateSettings("source").setSettings(prepareShrinkSettings).get(); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards final IndexMetaData indexMetaData = indexMetaData(client(), "source"); final long beforeShrinkPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetaData::primaryTerm).max().getAsLong(); @@ -228,7 +224,7 @@ public void testShrinkIndexPrimaryTerm() throws Exception { Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", numberOfTargetShards).build(); assertAcked(client().admin().indices().prepareResizeIndex("source", "target").setSettings(shrinkSettings).get()); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(120)); final IndexMetaData afterShrinkIndexMetaData = indexMetaData(client(), "target"); for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { From 32553ef77f13c651e3218cee583f3ce4637b59bb Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 14 May 2019 09:09:07 -0400 Subject: [PATCH 094/321] Remove TestXPackTransportClient (#42103) In preparation for removing the transport client for 8.0, this commit begins removing transport client testing infrastructure. The tests removed here were all testing transport client specific behavior. For the rest, there already exist http specific test methods. --- .../xpack/core/TestXPackTransportClient.java | 54 --- .../MachineLearningLicensingTests.java | 444 ++++++++---------- .../elasticsearch/license/LicensingTests.java | 63 +-- .../xpack/security/authc/RunAsIntegTests.java | 119 ----- .../authc/pki/PkiAuthenticationTests.java | 56 --- .../netty4/SslHostnameVerificationTests.java | 125 ----- .../transport/ssl/SslIntegrationTests.java | 62 +-- .../transport/ssl/SslMultiPortTests.java | 436 ----------------- .../xpack/ssl/SSLClientAuthTests.java | 41 -- 9 files changed, 189 insertions(+), 1211 deletions(-) delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/TestXPackTransportClient.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SslHostnameVerificationTests.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/TestXPackTransportClient.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/TestXPackTransportClient.java deleted file mode 100644 index 30c370c14c27d..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/TestXPackTransportClient.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core; - -import io.netty.util.ThreadDeathWatcher; -import io.netty.util.concurrent.GlobalEventExecutor; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.xpack.core.security.SecurityField; - -import java.util.Arrays; -import java.util.Collection; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.test.ESTestCase.getTestTransportPlugin; - -/** - * TransportClient.Builder that installs the XPackPlugin by default. - */ -@SuppressWarnings({"unchecked","varargs"}) -public class TestXPackTransportClient extends TransportClient { - - @SafeVarargs - public TestXPackTransportClient(Settings settings, Class... plugins) { - this(settings, Arrays.asList(plugins)); - } - - public TestXPackTransportClient(Settings settings, Collection> plugins) { - super(settings, Settings.EMPTY, addPlugins(plugins, getTestTransportPlugin()), null); - } - - @Override - public void close() { - super.close(); - if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) == false - || NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(SecurityField.NAME4)) { - try { - GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - try { - ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java index 287bd22f91f92..9e0692672349d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/license/MachineLearningLicensingTests.java @@ -8,17 +8,13 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; @@ -34,7 +30,6 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; import org.junit.Before; @@ -61,63 +56,50 @@ public void testMachineLearningPutJobActionRestricted() { License.OperationMode mode = randomInvalidLicenseType(); enableLicensing(mode); assertMLAllowed(false); + // test that license restricted apis do not work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture. newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), listener); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), listener); listener.actionGet(); - fail("put job action should not be enabled!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.status(), is(RestStatus.FORBIDDEN)); - assertThat(e.getMessage(), containsString("non-compliant")); - assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); - } + }); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + assertThat(e.getMessage(), containsString("non-compliant")); + assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); // Pick a license that does allow machine learning mode = randomValidLicenseType(); enableLicensing(mode); assertMLAllowed(true); // test that license restricted apis do now work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), listener); - PutJobAction.Response response = listener.actionGet(); - assertNotNull(response); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), listener); + PutJobAction.Response response = listener.actionGet(); + assertNotNull(response); } public void testMachineLearningOpenJobActionRestricted() throws Exception { String jobId = "testmachinelearningopenjobactionrestricted"; assertMLAllowed(true); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response response = putJobListener.actionGet(); - assertNotNull(response); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response response = putJobListener.actionGet(); + assertNotNull(response); // Pick a license that does not allow machine learning License.OperationMode mode = randomInvalidLicenseType(); enableLicensing(mode); assertMLAllowed(false); // test that license restricted apis do not work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), listener); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), listener); listener.actionGet(); - fail("open job action should not be enabled!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.status(), is(RestStatus.FORBIDDEN)); - assertThat(e.getMessage(), containsString("non-compliant")); - assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); - } + }); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + assertThat(e.getMessage(), containsString("non-compliant")); + assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); // Pick a license that does allow machine learning mode = randomValidLicenseType(); @@ -131,13 +113,10 @@ public void testMachineLearningOpenJobActionRestricted() throws Exception { }); // test that license restricted apis do now work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), listener); - AcknowledgedResponse response = listener.actionGet(); - assertNotNull(response); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), listener); + AcknowledgedResponse response2 = listener.actionGet(); + assertNotNull(response2); } public void testMachineLearningPutDatafeedActionRestricted() throws Exception { @@ -145,46 +124,36 @@ public void testMachineLearningPutDatafeedActionRestricted() throws Exception { String datafeedId = jobId + "-datafeed"; assertMLAllowed(true); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); // Pick a license that does not allow machine learning License.OperationMode mode = randomInvalidLicenseType(); enableLicensing(mode); assertMLAllowed(false); // test that license restricted apis do not work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(jobId))), listener); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(jobId))), listener); listener.actionGet(); - fail("put datafeed action should not be enabled!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.status(), is(RestStatus.FORBIDDEN)); - assertThat(e.getMessage(), containsString("non-compliant")); - assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); - } + }); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + assertThat(e.getMessage(), containsString("non-compliant")); + assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); // Pick a license that does allow machine learning mode = randomValidLicenseType(); enableLicensing(mode); assertMLAllowed(true); // test that license restricted apis do now work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(jobId))), listener); - PutDatafeedAction.Response response = listener.actionGet(); - assertNotNull(response); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, Collections.singletonList(jobId))), listener); + PutDatafeedAction.Response response = listener.actionGet(); + assertNotNull(response); } public void testAutoCloseJobWithDatafeed() throws Exception { @@ -194,31 +163,29 @@ public void testAutoCloseJobWithDatafeed() throws Exception { String datafeedIndex = jobId + "-data"; prepareCreate(datafeedIndex).addMapping("type", "{\"type\":{\"properties\":{\"time\":{\"type\":\"date\"}}}}", XContentType.JSON).get(); - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - // put job - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - // put datafeed - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, - Collections.singletonList(datafeedIndex))), putDatafeedListener); - PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); - assertNotNull(putDatafeedResponse); - // open job - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - // start datafeed - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); - listener.actionGet(); - } + + // put job + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); + // put datafeed + PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, + Collections.singletonList(datafeedIndex))), putDatafeedListener); + PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); + assertNotNull(putDatafeedResponse); + // open job + PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener); + AcknowledgedResponse openJobResponse = openJobListener.actionGet(); + assertNotNull(openJobResponse); + // start datafeed + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); + listener.actionGet(); + if (randomBoolean()) { enableLicensing(randomInvalidLicenseType()); @@ -245,18 +212,15 @@ public void testAutoCloseJobWithDatafeed() throws Exception { enableLicensing(randomValidLicenseType()); assertMLAllowed(true); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - // open job - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - // start datafeed - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); - listener.actionGet(); - } + // open job + PlainActionFuture openJobListener2 = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener2); + AcknowledgedResponse openJobResponse3 = openJobListener2.actionGet(); + assertNotNull(openJobResponse3); + // start datafeed + PlainActionFuture listener2 = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener2); + listener2.actionGet(); assertBusy(() -> { JobState jobState = getJobStats(jobId).getState(); @@ -299,24 +263,20 @@ public void testMachineLearningStartDatafeedActionRestricted() throws Exception prepareCreate(datafeedIndex).addMapping("type", "{\"type\":{\"properties\":{\"time\":{\"type\":\"date\"}}}}", XContentType.JSON).get(); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, - Collections.singletonList(datafeedIndex))), putDatafeedListener); - PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); - assertNotNull(putDatafeedResponse); - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); + PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, + Collections.singletonList(datafeedIndex))), putDatafeedListener); + PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); + assertNotNull(putDatafeedResponse); + PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener); + AcknowledgedResponse openJobResponse = openJobListener.actionGet(); + assertNotNull(openJobResponse); // Pick a license that does not allow machine learning License.OperationMode mode = randomInvalidLicenseType(); @@ -333,36 +293,30 @@ public void testMachineLearningStartDatafeedActionRestricted() throws Exception }); // test that license restricted apis do not work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); + new MachineLearningClient(client()).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); listener.actionGet(); - fail("start datafeed action should not be enabled!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.status(), is(RestStatus.FORBIDDEN)); - assertThat(e.getMessage(), containsString("non-compliant")); - assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); - } + }); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + assertThat(e.getMessage(), containsString("non-compliant")); + assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.MACHINE_LEARNING)); // Pick a license that does allow machine learning mode = randomValidLicenseType(); enableLicensing(mode); assertMLAllowed(true); // test that license restricted apis do now work - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - // re-open job now that the license is valid again - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); - AcknowledgedResponse response = listener.actionGet(); - assertNotNull(response); - } + // re-open job now that the license is valid again + PlainActionFuture openJobListener2 = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener2); + AcknowledgedResponse openJobResponse3 = openJobListener2.actionGet(); + assertNotNull(openJobResponse3); + + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).startDatafeed(new StartDatafeedAction.Request(datafeedId, 0L), listener); + AcknowledgedResponse response = listener.actionGet(); + assertNotNull(response); } public void testMachineLearningStopDatafeedActionNotRestricted() throws Exception { @@ -373,29 +327,25 @@ public void testMachineLearningStopDatafeedActionNotRestricted() throws Exceptio prepareCreate(datafeedIndex).addMapping("type", "{\"type\":{\"properties\":{\"time\":{\"type\":\"date\"}}}}", XContentType.JSON).get(); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, - Collections.singletonList(datafeedIndex))), putDatafeedListener); - PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); - assertNotNull(putDatafeedResponse); - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - PlainActionFuture startDatafeedListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).startDatafeed( - new StartDatafeedAction.Request(datafeedId, 0L), startDatafeedListener); - AcknowledgedResponse startDatafeedResponse = startDatafeedListener.actionGet(); - assertNotNull(startDatafeedResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); + PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, + Collections.singletonList(datafeedIndex))), putDatafeedListener); + PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); + assertNotNull(putDatafeedResponse); + PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener); + AcknowledgedResponse openJobResponse = openJobListener.actionGet(); + assertNotNull(openJobResponse); + PlainActionFuture startDatafeedListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).startDatafeed( + new StartDatafeedAction.Request(datafeedId, 0L), startDatafeedListener); + AcknowledgedResponse startDatafeedResponse = startDatafeedListener.actionGet(); + assertNotNull(startDatafeedResponse); boolean invalidLicense = randomBoolean(); if (invalidLicense) { @@ -404,30 +354,27 @@ public void testMachineLearningStopDatafeedActionNotRestricted() throws Exceptio enableLicensing(randomValidLicenseType()); } - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).stopDatafeed(new StopDatafeedAction.Request(datafeedId), listener); - if (invalidLicense) { - // the stop datafeed due to invalid license happens async, so check if the datafeed turns into stopped state: - assertBusy(() -> { - GetDatafeedsStatsAction.Response response = - new MachineLearningClient(client) - .getDatafeedsStats(new GetDatafeedsStatsAction.Request(datafeedId)).actionGet(); - assertEquals(DatafeedState.STOPPED, response.getResponse().results().get(0).getDatafeedState()); - }); - } else { - listener.actionGet(); - } - - if (invalidLicense) { - // the close due to invalid license happens async, so check if the job turns into closed state: - assertBusy(() -> { - GetJobsStatsAction.Response response = - new MachineLearningClient(client).getJobsStats(new GetJobsStatsAction.Request(jobId)).actionGet(); - assertEquals(JobState.CLOSED, response.getResponse().results().get(0).getState()); - }); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).stopDatafeed(new StopDatafeedAction.Request(datafeedId), listener); + if (invalidLicense) { + // the stop datafeed due to invalid license happens async, so check if the datafeed turns into stopped state: + assertBusy(() -> { + GetDatafeedsStatsAction.Response response = + new MachineLearningClient(client()) + .getDatafeedsStats(new GetDatafeedsStatsAction.Request(datafeedId)).actionGet(); + assertEquals(DatafeedState.STOPPED, response.getResponse().results().get(0).getDatafeedState()); + }); + } else { + listener.actionGet(); + } + + if (invalidLicense) { + // the close due to invalid license happens async, so check if the job turns into closed state: + assertBusy(() -> { + GetJobsStatsAction.Response response = + new MachineLearningClient(client()).getJobsStats(new GetJobsStatsAction.Request(jobId)).actionGet(); + assertEquals(JobState.CLOSED, response.getResponse().results().get(0).getState()); + }); } } @@ -435,18 +382,14 @@ public void testMachineLearningCloseJobActionNotRestricted() throws Exception { String jobId = "testmachinelearningclosejobactionnotrestricted"; assertMLAllowed(true); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - PlainActionFuture openJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).openJob(new OpenJobAction.Request(jobId), openJobListener); - AcknowledgedResponse openJobResponse = openJobListener.actionGet(); - assertNotNull(openJobResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); + PlainActionFuture openJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).openJob(new OpenJobAction.Request(jobId), openJobListener); + AcknowledgedResponse openJobResponse = openJobListener.actionGet(); + assertNotNull(openJobResponse); boolean invalidLicense = randomBoolean(); if (invalidLicense) { @@ -455,22 +398,19 @@ public void testMachineLearningCloseJobActionNotRestricted() throws Exception { enableLicensing(randomValidLicenseType()); } - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - CloseJobAction.Request request = new CloseJobAction.Request(jobId); - request.setCloseTimeout(TimeValue.timeValueSeconds(20)); - if (invalidLicense) { - // the close due to invalid license happens async, so check if the job turns into closed state: - assertBusy(() -> { - GetJobsStatsAction.Response response = - new MachineLearningClient(client).getJobsStats(new GetJobsStatsAction.Request(jobId)).actionGet(); - assertEquals(JobState.CLOSED, response.getResponse().results().get(0).getState()); - }); - } else { - new MachineLearningClient(client).closeJob(request, listener); - listener.actionGet(); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + CloseJobAction.Request request = new CloseJobAction.Request(jobId); + request.setCloseTimeout(TimeValue.timeValueSeconds(20)); + if (invalidLicense) { + // the close due to invalid license happens async, so check if the job turns into closed state: + assertBusy(() -> { + GetJobsStatsAction.Response response = + new MachineLearningClient(client()).getJobsStats(new GetJobsStatsAction.Request(jobId)).actionGet(); + assertEquals(JobState.CLOSED, response.getResponse().results().get(0).getState()); + }); + } else { + new MachineLearningClient(client()).closeJob(request, listener); + listener.actionGet(); } } @@ -478,25 +418,18 @@ public void testMachineLearningDeleteJobActionNotRestricted() throws Exception { String jobId = "testmachinelearningclosejobactionnotrestricted"; assertMLAllowed(true); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); // Pick a random license License.OperationMode mode = randomLicenseType(); enableLicensing(mode); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).deleteJob(new DeleteJobAction.Request(jobId), listener); - listener.actionGet(); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).deleteJob(new DeleteJobAction.Request(jobId), listener); + listener.actionGet(); } public void testMachineLearningDeleteDatafeedActionNotRestricted() throws Exception { @@ -504,31 +437,24 @@ public void testMachineLearningDeleteDatafeedActionNotRestricted() throws Except String datafeedId = jobId + "-datafeed"; assertMLAllowed(true); // test that license restricted apis do now work - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture putJobListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); - PutJobAction.Response putJobResponse = putJobListener.actionGet(); - assertNotNull(putJobResponse); - PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).putDatafeed( - new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, - Collections.singletonList(jobId))), putDatafeedListener); - PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); - assertNotNull(putDatafeedResponse); - } + PlainActionFuture putJobListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putJob(new PutJobAction.Request(createJob(jobId)), putJobListener); + PutJobAction.Response putJobResponse = putJobListener.actionGet(); + assertNotNull(putJobResponse); + PlainActionFuture putDatafeedListener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).putDatafeed( + new PutDatafeedAction.Request(createDatafeed(datafeedId, jobId, + Collections.singletonList(jobId))), putDatafeedListener); + PutDatafeedAction.Response putDatafeedResponse = putDatafeedListener.actionGet(); + assertNotNull(putDatafeedResponse); // Pick a random license License.OperationMode mode = randomLicenseType(); enableLicensing(mode); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateMachineLearning.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PlainActionFuture listener = PlainActionFuture.newFuture(); - new MachineLearningClient(client).deleteDatafeed(new DeleteDatafeedAction.Request(datafeedId), listener); - listener.actionGet(); - } + PlainActionFuture listener = PlainActionFuture.newFuture(); + new MachineLearningClient(client()).deleteDatafeed(new DeleteDatafeedAction.Request(datafeedId), listener); + listener.actionGet(); } private static OperationMode randomInvalidLicenseType() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java index 02b9cf61e4a39..c115aac11d732 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java @@ -19,11 +19,8 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.node.MockNode; @@ -36,14 +33,8 @@ import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; -import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.LocalStateSecurity; import org.junit.After; import org.junit.Before; @@ -60,7 +51,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; @@ -156,7 +146,7 @@ public void testEnableDisableBehaviour() throws Exception { assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); refresh(); - final Client client = internalCluster().transportClient(); + final Client client = internalCluster().client(); disableLicensing(); @@ -216,57 +206,6 @@ public void testRestAuthenticationByLicenseType() throws Exception { assertThat(authorizedAuthenticateResponse.getStatusLine().getStatusCode(), is(200)); } - public void testSecurityActionsByLicenseType() throws Exception { - // security actions should not work! - Settings settings = internalCluster().transportClient().settings(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - new SecurityClient(client).preparePutUser("john", "password".toCharArray(), Hasher.BCRYPT).get(); - fail("security actions should not be enabled!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.status(), is(RestStatus.FORBIDDEN)); - assertThat(e.getMessage(), containsString("non-compliant")); - } - - // enable a license that enables security - License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.TRIAL, - License.OperationMode.PLATINUM, License.OperationMode.STANDARD, OperationMode.BASIC); - enableLicensing(mode); - // security actions should work! - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - PutUserResponse response = new SecurityClient(client).preparePutUser("john", "password".toCharArray(), Hasher.BCRYPT).get(); - assertNotNull(response); - } - } - - public void testTransportClientAuthenticationByLicenseType() throws Exception { - Settings.Builder builder = Settings.builder() - .put(internalCluster().transportClient().settings()); - // remove user info - builder.remove(SecurityField.USER_SETTING.getKey()); - builder.remove(ThreadContext.PREFIX + "." + UsernamePasswordToken.BASIC_AUTH_HEADER); - - // basic has no auth - try (TransportClient client = new TestXPackTransportClient(builder.build(), LocalStateSecurity.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - assertGreenClusterState(client); - } - - // enable a license that enables security - License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.TRIAL, - License.OperationMode.PLATINUM, License.OperationMode.STANDARD); - enableLicensing(mode); - - try (TransportClient client = new TestXPackTransportClient(builder.build(), LocalStateSecurity.class)) { - client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); - client.admin().cluster().prepareHealth().get(); - fail("should not have been able to connect to a node!"); - } catch (NoNodeAvailableException e) { - // expected - } - } - public void testNodeJoinWithoutSecurityExplicitlyEnabled() throws Exception { License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.PLATINUM, License.OperationMode.STANDARD); enableLicensing(mode); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java index 6d5c6770bf2f5..2ce089f385896 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java @@ -5,35 +5,16 @@ */ package org.elasticsearch.xpack.security.authc; -import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.xpack.core.TestXPackTransportClient; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; -import org.elasticsearch.xpack.security.LocalStateSecurity; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.junit.BeforeClass; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; public class RunAsIntegTests extends SecurityIntegTestCase { @@ -86,43 +67,6 @@ protected boolean transportSSLEnabled() { return false; } - public void testUserImpersonation() throws Exception { - try (TransportClient client = getTransportClient(Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_CLIENT_USER + ":" + - SecuritySettingsSourceField.TEST_PASSWORD).build())) { - //ensure the client can connect - assertBusy(() -> assertThat(client.connectedNodes().size(), greaterThan(0))); - - // make sure the client can't get health - try { - client.admin().cluster().prepareHealth().get(); - fail("the client user should not have privileges to get the health"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.getMessage(), containsString("unauthorized")); - } - - // let's run as without authorization - try { - Map headers = Collections.singletonMap(AuthenticationServiceField.RUN_AS_USER_HEADER, - SecuritySettingsSource.TEST_USER_NAME); - client.filterWithHeader(headers) - .admin().cluster().prepareHealth().get(); - fail("run as should be unauthorized for the transport client user"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.getMessage(), containsString("unauthorized")); - assertThat(e.getMessage(), containsString("run as")); - } - - Map headers = new HashMap<>(); - headers.put("Authorization", UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, - new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); - headers.put(AuthenticationServiceField.RUN_AS_USER_HEADER, SecuritySettingsSource.TEST_USER_NAME); - // lets set the user - ClusterHealthResponse response = client.filterWithHeader(headers).admin().cluster().prepareHealth().get(); - assertThat(response.isTimedOut(), is(false)); - } - } - public void testUserImpersonationUsingHttp() throws Exception { // use the transport client user and try to run as try { @@ -156,29 +100,6 @@ public void testUserImpersonationUsingHttp() throws Exception { getRestClient().performRequest(requestForUserRunAsUser(SecuritySettingsSource.TEST_USER_NAME)); } - public void testEmptyUserImpersonationHeader() throws Exception { - try (TransportClient client = getTransportClient(Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_CLIENT_USER + ":" - + SecuritySettingsSourceField.TEST_PASSWORD).build())) { - //ensure the client can connect - awaitBusy(() -> { - return client.connectedNodes().size() > 0; - }); - - try { - Map headers = new HashMap<>(); - headers.put("Authorization", UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, - new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); - headers.put(AuthenticationServiceField.RUN_AS_USER_HEADER, ""); - - client.filterWithHeader(headers).admin().cluster().prepareHealth().get(); - fail("run as header should not be allowed to be empty"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.getMessage(), containsString("unable to authenticate")); - } - } - } - public void testEmptyHeaderUsingHttp() throws Exception { try { getRestClient().performRequest(requestForUserRunAsUser("")); @@ -188,29 +109,6 @@ public void testEmptyHeaderUsingHttp() throws Exception { } } - public void testNonExistentRunAsUser() throws Exception { - try (TransportClient client = getTransportClient(Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_CLIENT_USER + ":" + - SecuritySettingsSourceField.TEST_PASSWORD).build())) { - //ensure the client can connect - awaitBusy(() -> { - return client.connectedNodes().size() > 0; - }); - - try { - Map headers = new HashMap<>(); - headers.put("Authorization", UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, - new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); - headers.put(AuthenticationServiceField.RUN_AS_USER_HEADER, "idontexist"); - - client.filterWithHeader(headers).admin().cluster().prepareHealth().get(); - fail("run as header should not accept non-existent users"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.getMessage(), containsString("unauthorized")); - } - } - } - public void testNonExistentRunAsUserUsingHttp() throws Exception { try { getRestClient().performRequest(requestForUserRunAsUser("idontexist")); @@ -228,21 +126,4 @@ private static Request requestForUserRunAsUser(String user) { request.setOptions(options); return request; } - - // build our own here to better mimic an actual client... - TransportClient getTransportClient(Settings extraSettings) { - NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); - List nodes = nodeInfos.getNodes(); - assertTrue(nodes.isEmpty() == false); - TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); - String clusterName = nodeInfos.getClusterName().value(); - - Settings settings = Settings.builder() - .put(extraSettings) - .put("cluster.name", clusterName) - .build(); - - return new TestXPackTransportClient(settings, LocalStateSecurity.class) - .addTransportAddress(publishAddress); - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java index 1ef36f4fdbdf7..c3698f32b6e32 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java @@ -10,28 +10,19 @@ import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.util.EntityUtils; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.SecuritySingleNodeTestCase; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; import org.elasticsearch.xpack.core.common.socket.SocketAccess; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.PemUtils; import org.elasticsearch.xpack.core.ssl.SSLClientAuth; -import org.elasticsearch.xpack.security.LocalStateSecurity; import javax.net.ssl.KeyManager; import javax.net.ssl.SSLContext; import javax.net.ssl.TrustManager; - import java.net.InetSocketAddress; import java.security.SecureRandom; import java.util.Arrays; @@ -41,7 +32,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForNodePEMFiles; -import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForPEMFiles; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; @@ -85,36 +75,6 @@ protected boolean enableWarningsCheck() { return false; } - public void testTransportClientCanAuthenticateViaPki() { - Settings.Builder builder = Settings.builder(); - addSSLSettingsForPEMFiles( - builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem", - "testnode", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt")); - try (TransportClient client = createTransportClient(builder.build())) { - client.addTransportAddress(randomFrom(node().injector().getInstance(Transport.class).boundAddress().boundAddresses())); - IndexResponse response = client.prepareIndex("foo", "bar").setSource("pki", "auth").get(); - assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); - } - } - - /** - * Test uses the testclient cert which is trusted by the SSL layer BUT it is not trusted by the PKI authentication - * realm - */ - public void testTransportClientAuthenticationFailure() { - try (TransportClient client = createTransportClient(Settings.EMPTY)) { - client.addTransportAddress(randomFrom(node().injector().getInstance(Transport.class).boundAddress().boundAddresses())); - client.prepareIndex("foo", "bar").setSource("pki", "auth").get(); - fail("transport client should not have been able to authenticate"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); - } - } - public void testRestAuthenticationViaPki() throws Exception { SSLContext context = getRestSSLContext("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem", "testnode", @@ -157,22 +117,6 @@ private SSLContext getRestSSLContext(String keyPath, String password, String cer return context; } - private TransportClient createTransportClient(Settings additionalSettings) { - Settings clientSettings = transportClientSettings(); - if (additionalSettings.getByPrefix("xpack.security.transport.ssl.").isEmpty() == false) { - clientSettings = clientSettings.filter(k -> k.startsWith("xpack.security.transport.ssl.") == false); - } - - Settings.Builder builder = Settings.builder() - .put("xpack.security.transport.ssl.enabled", true) - .put(clientSettings, false) - .put(additionalSettings) - .put("cluster.name", node().settings().get("cluster.name")); - builder.remove(SecurityField.USER_SETTING.getKey()); - builder.remove("request.headers.Authorization"); - return new TestXPackTransportClient(builder.build(), LocalStateSecurity.class); - } - private String getNodeUrl() { TransportAddress transportAddress = randomFrom(node().injector().getInstance(HttpServerTransport.class) .boundAddress().boundAddresses()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SslHostnameVerificationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SslHostnameVerificationTests.java deleted file mode 100644 index 30208a1158075..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SslHostnameVerificationTests.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.transport.netty4; - -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; -import org.elasticsearch.xpack.security.LocalStateSecurity; - -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.Matchers.containsString; - -public class SslHostnameVerificationTests extends SecurityIntegTestCase { - - @Override - protected boolean transportSSLEnabled() { - return true; - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - Settings settings = super.nodeSettings(nodeOrdinal); - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(settings.filter(k -> k.startsWith("xpack.security.transport.ssl.") == false), false); - Path keyPath; - Path certPath; - Path nodeCertPath; - try { - /* - * This keystore uses a cert without any subject alternative names and a CN of "Elasticsearch Test Node No SAN" - * that will not resolve to a DNS name and will always cause hostname verification failures - */ - keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.pem"); - certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.crt"); - nodeCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"); - assert keyPath != null; - assert certPath != null; - assert nodeCertPath != null; - assertThat(Files.exists(certPath), is(true)); - assertThat(Files.exists(nodeCertPath), is(true)); - assertThat(Files.exists(keyPath), is(true)); - } catch (Exception e) { - throw new RuntimeException(e); - } - - SecuritySettingsSource.addSecureSettings(settingsBuilder, secureSettings -> { - secureSettings.setString("xpack.security.transport.ssl.secure_key_passphrase", "testnode-no-subjaltname"); - }); - return settingsBuilder.put("xpack.security.transport.ssl.key", keyPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.certificate", certPath.toAbsolutePath()) - .putList("xpack.security.transport.ssl.certificate_authorities", - Arrays.asList(certPath.toString(), nodeCertPath.toString())) - // disable hostname verification as this test uses certs without a valid SAN or DNS in the CN - .put("xpack.security.transport.ssl.verification_mode", "certificate") - .build(); - } - - @Override - protected Settings transportClientSettings() { - Path keyPath; - Path certPath; - Path nodeCertPath; - try { - keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.pem"); - certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.crt"); - nodeCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"); - assert keyPath != null; - assert certPath != null; - assert nodeCertPath != null; - assertThat(Files.exists(certPath), is(true)); - assertThat(Files.exists(nodeCertPath), is(true)); - assertThat(Files.exists(keyPath), is(true)); - } catch (Exception e) { - throw new RuntimeException(e); - } - Settings settings = super.transportClientSettings(); - // remove all ssl settings - Settings.Builder builder = Settings.builder(); - builder.put(settings.filter(k -> k.startsWith("xpack.security.transport.ssl.") == false), false); - - builder.put("xpack.security.transport.ssl.verification_mode", "certificate") - .put("xpack.security.transport.ssl.key", keyPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.key_passphrase", "testnode-no-subjaltname") - .put("xpack.security.transport.ssl.certificate", certPath.toAbsolutePath()) - .putList("xpack.security.transport.ssl.certificate_authorities", Arrays.asList(certPath.toString(), nodeCertPath.toString())); - return builder.build(); - } - - public void testThatHostnameMismatchDeniesTransportClientConnection() throws Exception { - Transport transport = internalCluster().getDataNodeInstance(Transport.class); - TransportAddress transportAddress = transport.boundAddress().publishAddress(); - InetSocketAddress inetSocketAddress = transportAddress.address(); - - Settings settings = Settings.builder().put(transportClientSettings()) - .put("xpack.security.transport.ssl.verification_mode", "full") - .build(); - - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { - client.addTransportAddress(new TransportAddress(inetSocketAddress.getAddress(), inetSocketAddress.getPort())); - client.admin().cluster().prepareHealth().get(); - fail("Expected a NoNodeAvailableException due to hostname verification failures"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); - } - } - - public void testTransportClientConnectionIgnoringHostnameVerification() throws Exception { - Client client = internalCluster().transportClient(); - assertGreenClusterState(client); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java index 5f25213beefa1..8488fe2a5e638 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java @@ -15,38 +15,26 @@ import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; -import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.common.socket.SocketAccess; import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.security.LocalStateSecurity; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.TrustManagerFactory; import java.io.InputStreamReader; import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.security.KeyStore; import java.security.SecureRandom; -import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; -import java.util.List; import java.util.Locale; -import java.util.Set; - -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLHandshakeException; -import javax.net.ssl.TrustManagerFactory; import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForNodePEMFiles; import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForPEMFiles; @@ -72,50 +60,6 @@ protected boolean transportSSLEnabled() { return true; } - // no SSL exception as this is the exception is returned when connecting - public void testThatUnconfiguredCiphersAreRejected() throws Exception { - Set supportedCiphers = Sets.newHashSet(SSLContext.getDefault().getSupportedSSLParameters().getCipherSuites()); - Set defaultXPackCiphers = Sets.newHashSet(XPackSettings.DEFAULT_CIPHERS); - final List unconfiguredCiphers = new ArrayList<>(Sets.difference(supportedCiphers, defaultXPackCiphers)); - Collections.shuffle(unconfiguredCiphers, random()); - assumeFalse("the unconfigured ciphers list is empty", unconfiguredCiphers.isEmpty()); - - try (TransportClient transportClient = new TestXPackTransportClient(Settings.builder() - .put(transportClientSettings()) - .put("node.name", "programmatic_transport_client") - .put("cluster.name", internalCluster().getClusterName()) - .putList("xpack.security.transport.ssl.cipher_suites", unconfiguredCiphers) - .build(), LocalStateSecurity.class)) { - - TransportAddress transportAddress = randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses()); - transportClient.addTransportAddress(transportAddress); - - transportClient.admin().cluster().prepareHealth().get(); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); - } - } - - public void testThatTransportClientUsingSSLv3ProtocolIsRejected() { - assumeFalse("Can't run in a FIPS JVM as SSLv3 SSLContext not available", inFipsJvm()); - try (TransportClient transportClient = new TestXPackTransportClient(Settings.builder() - .put(transportClientSettings()) - .put("node.name", "programmatic_transport_client") - .put("cluster.name", internalCluster().getClusterName()) - .putList("xpack.security.transport.ssl.supported_protocols", new String[]{"SSLv3"}) - .build(), LocalStateSecurity.class)) { - - TransportAddress transportAddress = randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses()); - transportClient.addTransportAddress(transportAddress); - - transportClient.admin().cluster().prepareHealth().get(); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); - } - } - public void testThatConnectionToHTTPWorks() throws Exception { Settings.Builder builder = Settings.builder(); addSSLSettingsForPEMFiles( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java deleted file mode 100644 index d07bff822a2f8..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.transport.ssl; - -import org.elasticsearch.bootstrap.JavaVersion; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.xpack.core.ssl.SSLClientAuth; -import org.elasticsearch.xpack.security.LocalStateSecurity; -import org.junit.BeforeClass; - -import java.net.InetAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -import static org.elasticsearch.test.SecuritySettingsSource.TEST_USER_NAME; -import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForNodePEMFiles; -import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForPEMFiles; -import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.Matchers.containsString; - -public class SslMultiPortTests extends SecurityIntegTestCase { - - private static int randomClientPort; - private static int randomNoClientAuthPort; - private static InetAddress localAddress; - - @BeforeClass - public static void getRandomPort() { - randomClientPort = randomIntBetween(49000, 65500); // ephemeral port - randomNoClientAuthPort = randomIntBetween(49000, 65500); - localAddress = InetAddress.getLoopbackAddress(); - } - - /** - * On each node sets up the following profiles: - *
    - *
  • default: testnode keypair. Requires client auth
  • - *
  • client: testnode-client-profile profile that only trusts the testclient cert. Requires client auth
  • - *
  • no_client_auth: testnode keypair. Does not require client auth
  • - *
- */ - @Override - protected Settings nodeSettings(int nodeOrdinal) { - String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); - String randomNoClientAuthPortRange = randomNoClientAuthPort + "-" + (randomNoClientAuthPort+100); - - Path trustCert; - try { - trustCert = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt"); - assertThat(Files.exists(trustCert), is(true)); - } catch (Exception e) { - throw new RuntimeException(e); - } - - Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); - addSSLSettingsForNodePEMFiles(builder, "transport.profiles.client.xpack.security.", true); - builder.put("transport.profiles.client.port", randomClientPortRange) - .put("transport.profiles.client.bind_host", NetworkAddress.format(localAddress)) - .put("transport.profiles.client.xpack.security.ssl.certificate_authorities", trustCert.toAbsolutePath()); - addSSLSettingsForNodePEMFiles(builder, "transport.profiles.no_client_auth.xpack.security.", true); - builder.put("transport.profiles.no_client_auth.port", randomNoClientAuthPortRange) - .put("transport.profiles.no_client_auth.bind_host", NetworkAddress.format(localAddress)) - .put("transport.profiles.no_client_auth.xpack.security.ssl.client_authentication", SSLClientAuth.NONE); - final Settings settings = builder.build(); - logger.info("node {} settings:\n{}", nodeOrdinal, settings); - return settings; - } - - @Override - protected boolean transportSSLEnabled() { - return true; - } - - private TransportClient createTransportClient(Settings additionalSettings) { - Settings settings = Settings.builder() - .put(transportClientSettings().filter(s -> s.startsWith("xpack.security.transport.ssl") == false)) - .put("node.name", "programmatic_transport_client") - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.enabled", true) - .put(additionalSettings) - .build(); - //return new TestXPackTransportClient(settings, LocalStateSecurity.class); - logger.info("transport client settings:\n{}", settings); - return new TestXPackTransportClient(settings, LocalStateSecurity.class); - } - - /** - * Uses the internal cluster's transport client to test connection to the default profile. The internal transport - * client uses the same SSL settings as the default profile so a connection should always succeed - */ - public void testThatStandardTransportClientCanConnectToDefaultProfile() throws Exception { - assertGreenClusterState(internalCluster().transportClient()); - } - - /** - * Uses a transport client with the same settings as the internal cluster transport client to test connection to the - * no_client_auth profile. The internal transport client is not used here since we are connecting to a different - * profile. Since the no_client_auth profile does not require client authentication, the standard transport client - * connection should always succeed as the settings are the same as the default profile except for the port and - * disabling the client auth requirement - */ - public void testThatStandardTransportClientCanConnectToNoClientAuthProfile() throws Exception { - try(TransportClient transportClient = new TestXPackTransportClient(Settings.builder() - .put(transportClientSettings()) - .put("xpack.security.transport.ssl.enabled", true) - .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) - .put("node.name", "programmatic_transport_client") - .put("cluster.name", internalCluster().getClusterName()) - .build(), LocalStateSecurity.class)) { - transportClient.addTransportAddress(new TransportAddress(localAddress, - getProfilePort("no_client_auth"))); - assertGreenClusterState(transportClient); - } - } - - /** - * Uses a transport client with the same settings as the internal cluster transport client to test connection to the - * client profile. The internal transport client is not used here since we are connecting to a different - * profile. The client profile requires client auth and only trusts the certificate in the testclient-client-profile - * keystore so this connection will fail as the certificate presented by the standard transport client is not trusted - * by this profile - */ - public void testThatStandardTransportClientCannotConnectToClientProfile() throws Exception { - try (TransportClient transportClient = createTransportClient(Settings.EMPTY)) { - transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("client"))); - transportClient.admin().cluster().prepareHealth().get(); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with a custom key pair; TransportClient only trusts the testnode - * certificate and had its own self signed certificate. This test connects to the client profile, which is only - * set to trust the testclient-client-profile certificate so the connection should always succeed - */ - public void testThatProfileTransportClientCanConnectToClientProfile() throws Exception { - Settings.Builder builder = Settings.builder(); - addSSLSettingsForPEMFiles( - builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem", - "testclient-client-profile", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt", - Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt")); - try (TransportClient transportClient = createTransportClient(builder.build())) { - transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("client"))); - assertGreenClusterState(transportClient); - } - } - - /** - * Uses a transport client with a custom key pair; TransportClient only trusts the testnode - * certificate and had its own self signed certificate. This test connects to the no_client_auth profile, which - * uses a truststore that does not trust the testclient-client-profile certificate but does not require client - * authentication - */ - public void testThatProfileTransportClientCanConnectToNoClientAuthProfile() throws Exception { - Settings.Builder builder = Settings.builder(); - addSSLSettingsForPEMFiles( - builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem", - "testclient-client-profile", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt", - List.of("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - builder.putList("xpack.security.transport.ssl.supported_protocols", getProtocols()); - try (TransportClient transportClient = createTransportClient(builder.build())) { - transportClient.addTransportAddress(new TransportAddress(localAddress, - getProfilePort("no_client_auth"))); - assertGreenClusterState(transportClient); - } - } - - /** - * Uses a transport client with a custom key pair; TransportClient only trusts the testnode - * certificate and had its own self signed certificate. This test connects to the default profile, which - * uses a truststore that does not trust the testclient-client-profile certificate and requires client authentication - * so the connection should always fail - */ - public void testThatProfileTransportClientCannotConnectToDefaultProfile() throws Exception { - Settings.Builder builder = Settings.builder(); - addSSLSettingsForPEMFiles( - builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem", - "testclient-client-profile", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt", - Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - try (TransportClient transportClient = createTransportClient(builder.build())) { - TransportAddress transportAddress = randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses()); - transportClient.addTransportAddress(transportAddress); - transportClient.admin().cluster().prepareHealth().get(); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with SSL disabled. This test connects to the default profile, which should always fail - * as a non-ssl transport client cannot connect to a ssl profile - */ - public void testThatTransportClientCannotConnectToDefaultProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses())); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with SSL disabled. This test connects to the client profile, which should always fail - * as a non-ssl transport client cannot connect to a ssl profile - */ - public void testThatTransportClientCannotConnectToClientProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("client"))); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with SSL disabled. This test connects to the no_client_auth profile, which should always fail - * as a non-ssl transport client cannot connect to a ssl profile - */ - public void testThatTransportClientCannotConnectToNoClientAuthProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, - getProfilePort("no_client_auth"))); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client that only trusts the testnode certificate. This test connects to the no_client_auth profile, - * which uses the testnode certificate and does not require to present a certificate, so this connection should always succeed - */ - public void testThatTransportClientWithOnlyTruststoreCanConnectToNoClientAuthProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.certificate_authorities", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) - .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, - getProfilePort("no_client_auth"))); - } - } - - /** - * Uses a transport client that only trusts the testnode certificate. This test connects to the client profile, which uses - * the testnode certificate and requires the client to present a certificate, so this connection will never work as - * the client has no certificate to present - */ - public void testThatTransportClientWithOnlyTruststoreCannotConnectToClientProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.certificate_authorities", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("client"))); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client that only trusts the testnode certificate. This test connects to the default profile, which uses - * the testnode certificate and requires the client to present a certificate, so this connection will never work as - * the client has no certificate to present - */ - public void testThatTransportClientWithOnlyTruststoreCannotConnectToDefaultProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.certificate_authorities", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses())); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with the default JDK truststore; this truststore only trusts the known good public - * certificate authorities. This test connects to the default profile, which uses a self-signed certificate that - * will never be trusted by the default truststore so the connection should always fail - */ - public void testThatSSLTransportClientWithNoTruststoreCannotConnectToDefaultProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.enabled", true) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses())); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with the default JDK truststore; this truststore only trusts the known good public - * certificate authorities. This test connects to the client profile, which uses a self-signed certificate that - * will never be trusted by the default truststore so the connection should always fail - */ - public void testThatSSLTransportClientWithNoTruststoreCannotConnectToClientProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.enabled", true) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("client"))); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - /** - * Uses a transport client with the default JDK truststore; this truststore only trusts the known good public - * certificate authorities. This test connects to the no_client_auth profile, which uses a self-signed certificate that - * will never be trusted by the default truststore so the connection should always fail - */ - public void testThatSSLTransportClientWithNoTruststoreCannotConnectToNoClientAuthProfile() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) - .put("cluster.name", internalCluster().getClusterName()) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.enabled", true) - .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) - .build(); - try (TransportClient transportClient = new TestXPackTransportClient(settings, - Collections.singletonList(LocalStateSecurity.class))) { - transportClient.addTransportAddress(new TransportAddress(localAddress, - getProfilePort("no_client_auth"))); - assertGreenClusterState(transportClient); - fail("Expected NoNodeAvailableException"); - } catch (NoNodeAvailableException e) { - assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); - } - } - - private static int getProfilePort(String profile) { - TransportAddress[] transportAddresses = - internalCluster().getInstance(Transport.class).profileBoundAddresses().get(profile).boundAddresses(); - for (TransportAddress address : transportAddresses) { - if (address.address().getAddress().equals(localAddress)) { - return address.address().getPort(); - } - } - throw new IllegalStateException("failed to find transport address equal to [" + NetworkAddress.format(localAddress) + "] " + - " in the following bound addresses " + Arrays.toString(transportAddresses)); - } - - /** - * TLSv1.3 when running in a JDK prior to 11.0.3 has a race condition when multiple simultaneous connections are established. See - * JDK-8213202. This issue is not triggered when using client authentication, which we do by default for transport connections. - * However if client authentication is turned off and TLSv1.3 is used on the affected JVMs then we will hit this issue. - */ - private static List getProtocols() { - JavaVersion full = - AccessController.doPrivileged((PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); - if (full.compareTo(JavaVersion.parse("11.0.3")) < 0) { - return List.of("TLSv1.2"); - } - return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java index 52a03dca95b36..ce0cc5c111265 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java @@ -16,20 +16,14 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.xpack.core.TestXPackTransportClient; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.PemUtils; import org.elasticsearch.xpack.core.ssl.SSLClientAuth; -import org.elasticsearch.xpack.security.LocalStateSecurity; import javax.net.ssl.KeyManager; import javax.net.ssl.SSLContext; @@ -38,8 +32,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; -import java.nio.file.Files; -import java.nio.file.Path; import java.security.AccessController; import java.security.PrivilegedAction; import java.security.SecureRandom; @@ -135,39 +127,6 @@ public void testThatHttpWorksWithSslClientAuth() throws IOException { } } - public void testThatTransportWorksWithoutSslClientAuth() throws IOException { - // specify an arbitrary key and certificate - not the certs needed to connect to the transport protocol - Path keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem"); - Path certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt"); - Path nodeCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"); - Path nodeEcCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt"); - - if (Files.notExists(keyPath) || Files.notExists(certPath)) { - throw new ElasticsearchException("key or certificate path doesn't exist"); - } - - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("xpack.security.transport.ssl.secure_key_passphrase", "testclient-client-profile"); - Settings settings = Settings.builder() - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.NONE) - .put("xpack.security.transport.ssl.key", keyPath) - .put("xpack.security.transport.ssl.certificate", certPath) - .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) - .putList("xpack.security.transport.ssl.certificate_authorities", nodeCertPath.toString(), nodeEcCertPath.toString()) - .setSecureSettings(secureSettings) - .put("cluster.name", internalCluster().getClusterName()) - .put(SecurityField.USER_SETTING.getKey(), transportClientUsername() + ":" + new String(transportClientPassword().getChars())) - .build(); - try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { - Transport transport = internalCluster().getDataNodeInstance(Transport.class); - TransportAddress transportAddress = transport.boundAddress().publishAddress(); - client.addTransportAddress(transportAddress); - - assertGreenClusterState(client); - } - } - private SSLContext getSSLContext() { try { String certPath = "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt"; From 5cadfe7f119838276c91107a09607c3e9fc37a46 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 14 May 2019 15:34:00 +0200 Subject: [PATCH 095/321] Some Cleanup in o.e.gateway Package (#42108) * Removing obvious dead code * Removing redundant listener interface --- .../gateway/BaseGatewayShardAllocator.java | 2 +- .../gateway/DanglingIndicesState.java | 5 ++-- .../org/elasticsearch/gateway/Gateway.java | 9 +------ .../gateway/GatewayException.java | 8 ------ .../gateway/GatewayMetaState.java | 25 ++++++----------- .../elasticsearch/gateway/GatewayService.java | 5 ++-- .../gateway/LocalAllocateDangledIndices.java | 13 ++------- .../gateway/MetaStateService.java | 3 +-- .../gateway/PrimaryShardAllocator.java | 8 +++--- .../gateway/PriorityComparator.java | 4 +-- .../gateway/ReplicaShardAllocator.java | 14 +++++----- .../TransportNodesListGatewayMetaState.java | 22 --------------- ...ransportNodesListGatewayStartedShards.java | 11 +++----- .../java/org/elasticsearch/node/Node.java | 4 +-- .../gateway/GatewayServiceTests.java | 3 +-- .../gateway/MockGatewayMetaState.java | 6 ++--- .../indices/IndicesServiceTests.java | 27 +++++-------------- 17 files changed, 46 insertions(+), 123 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java index 7e4172961ea1e..d8b96550ad01a 100644 --- a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java @@ -93,7 +93,7 @@ public abstract AllocateUnassignedDecision makeAllocationDecision(ShardRouting u * Builds decisions for all nodes in the cluster, so that the explain API can provide information on * allocation decisions for each node, while still waiting to allocate the shard (e.g. due to fetching shard data). */ - protected List buildDecisionsForAllNodes(ShardRouting shard, RoutingAllocation allocation) { + protected static List buildDecisionsForAllNodes(ShardRouting shard, RoutingAllocation allocation) { List results = new ArrayList<>(); for (RoutingNode node : allocation.routingNodes()) { Decision decision = allocation.deciders().canAllocate(shard, node, allocation); diff --git a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index 9a1c79b476e1b..fefd807d8d8a5 100644 --- a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -22,6 +22,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexGraveyard; @@ -162,14 +163,14 @@ private void allocateDanglingIndices() { } try { allocateDangledIndices.allocateDangled(Collections.unmodifiableCollection(new ArrayList<>(danglingIndices.values())), - new LocalAllocateDangledIndices.Listener() { + new ActionListener<>() { @Override public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) { logger.trace("allocated dangled"); } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { logger.info("failed to send allocated dangled", e); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index dea544d40d55b..c59d52c60be7a 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -28,9 +28,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndicesService; import java.util.Arrays; import java.util.function.Function; @@ -43,12 +41,7 @@ public class Gateway { private final TransportNodesListGatewayMetaState listGatewayMetaState; - private final IndicesService indicesService; - - public Gateway(final Settings settings, final ClusterService clusterService, - final TransportNodesListGatewayMetaState listGatewayMetaState, - final IndicesService indicesService) { - this.indicesService = indicesService; + public Gateway(final ClusterService clusterService, final TransportNodesListGatewayMetaState listGatewayMetaState) { this.clusterService = clusterService; this.listGatewayMetaState = listGatewayMetaState; } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayException.java b/server/src/main/java/org/elasticsearch/gateway/GatewayException.java index 32050f1c10e7d..380610a593675 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayException.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayException.java @@ -26,14 +26,6 @@ public class GatewayException extends ElasticsearchException { - public GatewayException(String msg) { - super(msg); - } - - public GatewayException(String msg, Throwable cause) { - super(msg, cause); - } - public GatewayException(StreamInput in) throws IOException { super(in); } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 30361fa70ee6b..91bcb68370ea1 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -44,9 +44,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.MetaDataUpgrader; import org.elasticsearch.transport.TransportService; @@ -76,11 +74,9 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState.PersistedState { protected static final Logger logger = LogManager.getLogger(GatewayMetaState.class); - private final NodeEnvironment nodeEnv; private final MetaStateService metaStateService; private final Settings settings; private final ClusterService clusterService; - private final IndicesService indicesService; private final TransportService transportService; //there is a single thread executing updateClusterState calls, hence no volatile modifier @@ -88,16 +84,13 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState. protected ClusterState previousClusterState; protected boolean incrementalWrite; - public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, + public GatewayMetaState(Settings settings, MetaStateService metaStateService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, MetaDataUpgrader metaDataUpgrader, - TransportService transportService, ClusterService clusterService, - IndicesService indicesService) throws IOException { + TransportService transportService, ClusterService clusterService) throws IOException { this.settings = settings; - this.nodeEnv = nodeEnv; this.metaStateService = metaStateService; this.transportService = transportService; this.clusterService = clusterService; - this.indicesService = indicesService; upgradeMetaData(metaDataIndexUpgradeService, metaDataUpgrader); initializeClusterState(ClusterName.CLUSTER_NAME_SETTING.get(settings)); @@ -184,7 +177,7 @@ protected void upgradeMetaData(MetaDataIndexUpgradeService metaDataIndexUpgradeS } } - protected boolean isMasterOrDataNode() { + private boolean isMasterOrDataNode() { return DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings); } @@ -312,13 +305,12 @@ long writeIndex(String reason, IndexMetaData metaData) throws WriteStateExceptio } } - long writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { + void writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { assert finished == false : FINISHED_MSG; try { - long generation = metaStateService.writeManifestAndCleanup(reason, manifest); + metaStateService.writeManifestAndCleanup(reason, manifest); commitCleanupActions.forEach(Runnable::run); finished = true; - return generation; } catch (WriteStateException e) { // if Manifest write results in dirty WriteStateException it's not safe to remove // new metadata files, because if Manifest was actually written to disk and its deletion @@ -346,7 +338,7 @@ void rollback() { * * @throws WriteStateException if exception occurs. See also {@link WriteStateException#isDirty()}. */ - protected void updateClusterState(ClusterState newState, ClusterState previousState) + private void updateClusterState(ClusterState newState, ClusterState previousState) throws WriteStateException { MetaData newMetaData = newState.metaData(); @@ -406,7 +398,7 @@ public static Set getRelevantIndices(ClusterState state, ClusterState pre } private static boolean isDataOnlyNode(ClusterState state) { - return ((state.nodes().getLocalNode().isMasterNode() == false) && state.nodes().getLocalNode().isDataNode()); + return state.nodes().getLocalNode().isMasterNode() == false && state.nodes().getLocalNode().isDataNode(); } /** @@ -535,8 +527,7 @@ private static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, C } private static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) { - Set relevantIndices; - relevantIndices = new HashSet<>(); + Set relevantIndices = new HashSet<>(); // we have to iterate over the metadata to make sure we also capture closed indices for (IndexMetaData indexMetaData : state.metaData()) { relevantIndices.add(indexMetaData.getIndex()); diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index 3cc8ec167552c..b7b7d0759980e 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -40,7 +40,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; @@ -93,7 +92,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste public GatewayService(final Settings settings, final AllocationService allocationService, final ClusterService clusterService, final ThreadPool threadPool, final TransportNodesListGatewayMetaState listGatewayMetaState, - final IndicesService indicesService, final Discovery discovery) { + final Discovery discovery) { this.allocationService = allocationService; this.clusterService = clusterService; this.threadPool = threadPool; @@ -122,7 +121,7 @@ public GatewayService(final Settings settings, final AllocationService allocatio recoveryRunnable = () -> clusterService.submitStateUpdateTask("local-gateway-elected-state", new RecoverStateUpdateTask()); } else { - final Gateway gateway = new Gateway(settings, clusterService, listGatewayMetaState, indicesService); + final Gateway gateway = new Gateway(clusterService, listGatewayMetaState); recoveryRunnable = () -> gateway.performStateRecovery(new GatewayRecoveryListener()); } diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index a5f4f77da438b..b51d16dbc5116 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -76,7 +77,7 @@ public LocalAllocateDangledIndices(TransportService transportService, ClusterSer new AllocateDangledRequestHandler()); } - public void allocateDangled(Collection indices, final Listener listener) { + public void allocateDangled(Collection indices, ActionListener listener) { ClusterState clusterState = clusterService.state(); DiscoveryNode masterNode = clusterState.nodes().getMasterNode(); if (masterNode == null) { @@ -110,12 +111,6 @@ public String executor() { }); } - public interface Listener { - void onResponse(AllocateDangledResponse response); - - void onFailure(Throwable e); - } - class AllocateDangledRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final AllocateDangledRequest request, final TransportChannel channel, Task task) throws Exception { @@ -257,10 +252,6 @@ public static class AllocateDangledResponse extends TransportResponse { this.ack = ack; } - public boolean ack() { - return ack; - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index d67cdccb9a09b..3bd8ba11a57ec 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -199,12 +199,11 @@ MetaData loadGlobalState() throws IOException { * * @throws WriteStateException if exception when writing state occurs. See also {@link WriteStateException#isDirty()} */ - public long writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { + public void writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { logger.trace("[_meta] writing state, reason [{}]", reason); try { long generation = MANIFEST_FORMAT.writeAndCleanup(manifest, nodeEnv.nodeDataPaths()); logger.trace("[_meta] state written (generation: {})", generation); - return generation; } catch (WriteStateException ex) { throw new WriteStateException(ex.isDirty(), "[_meta]: failed to write meta state", ex); } diff --git a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 79030336acc02..d2e82d092e603 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -297,10 +297,10 @@ protected static NodeShardsResult buildNodeShardsResult(ShardRouting shard, bool /** * Split the list of node shard states into groups yes/no/throttle based on allocation deciders */ - private NodesToAllocate buildNodesToAllocate(RoutingAllocation allocation, - List nodeShardStates, - ShardRouting shardRouting, - boolean forceAllocate) { + private static NodesToAllocate buildNodesToAllocate(RoutingAllocation allocation, + List nodeShardStates, + ShardRouting shardRouting, + boolean forceAllocate) { List yesNodeShards = new ArrayList<>(); List throttledNodeShards = new ArrayList<>(); List noNodeShards = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java b/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java index 1d24baf561ab3..60bdc2434e972 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java +++ b/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java @@ -56,11 +56,11 @@ public final int compare(ShardRouting o1, ShardRouting o2) { return cmp; } - private int priority(Settings settings) { + private static int priority(Settings settings) { return IndexMetaData.INDEX_PRIORITY_SETTING.get(settings); } - private long timeCreated(Settings settings) { + private static long timeCreated(Settings settings) { return settings.getAsLong(IndexMetaData.SETTING_CREATION_DATE, -1L); } diff --git a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index 10bd6115b4c74..ce3cde3e6db71 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -243,8 +243,8 @@ public AllocateUnassignedDecision makeAllocationDecision(final ShardRouting unas * YES or THROTTLE). If in explain mode, also returns the node-level explanations as the second element * in the returned tuple. */ - private Tuple> canBeAllocatedToAtLeastOneNode(ShardRouting shard, - RoutingAllocation allocation) { + private static Tuple> canBeAllocatedToAtLeastOneNode(ShardRouting shard, + RoutingAllocation allocation) { Decision madeDecision = Decision.NO; final boolean explain = allocation.debugDecision(); Map nodeDecisions = explain ? new HashMap<>() : null; @@ -260,7 +260,7 @@ private Tuple> canBeAllocatedToAtLea if (explain) { madeDecision = decision; } else { - return Tuple.tuple(decision, nodeDecisions); + return Tuple.tuple(decision, null); } } else if (madeDecision.type() == Decision.Type.NO && decision.type() == Decision.Type.THROTTLE) { madeDecision = decision; @@ -276,8 +276,8 @@ private Tuple> canBeAllocatedToAtLea * Takes the store info for nodes that have a shard store and adds them to the node decisions, * leaving the node explanations untouched for those nodes that do not have any store information. */ - private List augmentExplanationsWithStoreInfo(Map nodeDecisions, - Map withShardStores) { + private static List augmentExplanationsWithStoreInfo(Map nodeDecisions, + Map withShardStores) { if (nodeDecisions == null || withShardStores == null) { return null; } @@ -295,8 +295,8 @@ private List augmentExplanationsWithStoreInfo(Map data) { + private static TransportNodesListShardStoreMetaData.StoreFilesMetaData findStore(ShardRouting shard, RoutingAllocation allocation, + AsyncShardFetch.FetchResult data) { assert shard.currentNodeId() != null; DiscoveryNode primaryNode = allocation.nodes().get(shard.currentNodeId()); if (primaryNode == null) { diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 477961c8a6d0c..ab0fad88ecdfa 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -94,23 +94,10 @@ public Request() { public Request(String... nodesIds) { super(nodesIds); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } } public static class NodesGatewayMetaState extends BaseNodesResponse { - NodesGatewayMetaState() { - } - public NodesGatewayMetaState(ClusterName clusterName, List nodes, List failures) { super(clusterName, nodes, failures); } @@ -135,15 +122,6 @@ public NodeRequest() { super(nodeId); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } } public static class NodeGatewayMetaState extends BaseNodeResponse { diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index c9e7100ebd66e..1893be3acd518 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -50,6 +50,7 @@ import java.io.IOException; import java.util.List; +import java.util.Objects; /** * This transport action is used to fetch the shard version from each node during primary allocation in {@link GatewayAllocator}. @@ -309,14 +310,8 @@ public boolean equals(Object o) { NodeGatewayStartedShards that = (NodeGatewayStartedShards) o; - if (primary != that.primary) { - return false; - } - if (allocationId != null ? !allocationId.equals(that.allocationId) : that.allocationId != null) { - return false; - } - return storeException != null ? storeException.equals(that.storeException) : that.storeException == null; - + return primary == that.primary && Objects.equals(allocationId, that.allocationId) + && Objects.equals(storeException, that.storeException); } @Override diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index f0ec86f8cb84c..782101763b7a4 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -472,8 +472,8 @@ protected Node( ).collect(Collectors.toSet()); final TransportService transportService = newTransportService(settings, transport, threadPool, networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings(), taskHeaders); - final GatewayMetaState gatewayMetaState = new GatewayMetaState(settings, nodeEnvironment, metaStateService, - metaDataIndexUpgradeService, metaDataUpgrader, transportService, clusterService, indicesService); + final GatewayMetaState gatewayMetaState = new GatewayMetaState(settings, metaStateService, + metaDataIndexUpgradeService, metaDataUpgrader, transportService, clusterService); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); final SearchTransportService searchTransportService = new SearchTransportService(transportService, SearchExecutionStatsCollector.makeWrapper(responseCollectorService)); diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java index d6fd80f3513c2..4049cec796102 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java @@ -32,8 +32,7 @@ private GatewayService createService(final Settings.Builder settings) { final ClusterService clusterService = new ClusterService(Settings.builder().put("cluster.name", "GatewayServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null); - return new GatewayService(settings.build(), - null, clusterService, null, null, null, null); + return new GatewayService(settings.build(), null, clusterService, null, null, null); } public void testDefaultRecoverAfterTime() { diff --git a/server/src/test/java/org/elasticsearch/gateway/MockGatewayMetaState.java b/server/src/test/java/org/elasticsearch/gateway/MockGatewayMetaState.java index 7541ca860def6..7749a0edc37b8 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MockGatewayMetaState.java +++ b/server/src/test/java/org/elasticsearch/gateway/MockGatewayMetaState.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.MetaDataUpgrader; import org.elasticsearch.transport.TransportService; @@ -46,10 +45,9 @@ public class MockGatewayMetaState extends GatewayMetaState { public MockGatewayMetaState(Settings settings, NodeEnvironment nodeEnvironment, NamedXContentRegistry xContentRegistry, DiscoveryNode localNode) throws IOException { - super(settings, nodeEnvironment, new MetaStateService(nodeEnvironment, xContentRegistry), + super(settings, new MetaStateService(nodeEnvironment, xContentRegistry), mock(MetaDataIndexUpgradeService.class), mock(MetaDataUpgrader.class), - mock(TransportService.class), mock(ClusterService.class), - mock(IndicesService.class)); + mock(TransportService.class), mock(ClusterService.class)); this.localNode = localNode; } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 1ccf858ed1590..4a77160ce36d0 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.cluster.ClusterName; @@ -385,18 +386,18 @@ public void testDanglingIndicesWithAliasConflict() throws Exception { .numberOfShards(1) .numberOfReplicas(0) .build(); - DanglingListener listener = new DanglingListener(); - dangling.allocateDangled(Arrays.asList(indexMetaData), listener); - listener.latch.await(); + CountDownLatch latch = new CountDownLatch(1); + dangling.allocateDangled(Arrays.asList(indexMetaData), ActionListener.wrap(latch::countDown)); + latch.await(); assertThat(clusterService.state(), equalTo(originalState)); // remove the alias client().admin().indices().prepareAliases().removeAlias(indexName, alias).get(); // now try importing a dangling index with the same name as the alias, it should succeed. - listener = new DanglingListener(); - dangling.allocateDangled(Arrays.asList(indexMetaData), listener); - listener.latch.await(); + latch = new CountDownLatch(1); + dangling.allocateDangled(Arrays.asList(indexMetaData), ActionListener.wrap(latch::countDown)); + latch.await(); assertThat(clusterService.state(), not(originalState)); assertNotNull(clusterService.state().getMetaData().index(alias)); } @@ -431,20 +432,6 @@ public void testIndexAndTombstoneWithSameNameOnStartup() throws Exception { indicesService.verifyIndexIsDeleted(tombstonedIndex, clusterState); } - private static class DanglingListener implements LocalAllocateDangledIndices.Listener { - final CountDownLatch latch = new CountDownLatch(1); - - @Override - public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) { - latch.countDown(); - } - - @Override - public void onFailure(Throwable e) { - latch.countDown(); - } - } - /** * Tests that teh {@link MapperService} created by {@link IndicesService#createIndexMapperService(IndexMetaData)} contains * custom types and similarities registered by plugins From 32660118f4238a0047cf01ffd6be634034dba90c Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 14 May 2019 16:10:31 -0400 Subject: [PATCH 096/321] Fix versionutils previous compatiblity method (#42140) This commit fixes the behavior of the "previous compatible" utility method for version tests. It was supposed to not include the passed in version, but the version range method was inclusive. --- .../src/main/java/org/elasticsearch/test/VersionUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java index 4342daacd3158..8bfa0becaee94 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java @@ -263,6 +263,6 @@ public static Version randomPreviousCompatibleVersion(Random random, Version ver // TODO: change this to minimumCompatibilityVersion(), but first need to remove released/unreleased // versions so getPreviousVerison returns the *actual* previous version. Otherwise eg 8.0.0 returns say 7.0.2 for previous, // but 7.2.0 for minimum compat - return randomVersionBetween(random, version.minimumIndexCompatibilityVersion(), version); + return randomVersionBetween(random, version.minimumIndexCompatibilityVersion(), getPreviousVersion(version)); } } From c59da59b4b97b9d424574f0e4ebc1c01612642d1 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Tue, 14 May 2019 16:29:18 -0400 Subject: [PATCH 097/321] Concurrent tests wait for threads to be ready (#42083) This change updates tests that use a CountDownLatch to synchronize the running of threads when testing concurrent operations so that we ensure the thread has been fully created and run by the scheduler. Previously, these tests used a latch with a value of 1 and the test thread counted down while the threads performing concurrent operations just waited. This change updates the value of the latch to be 1 + the number of threads. Each thread counts down and then waits. This means that each thread has been constructed and has started running. All threads will have a common start point now. --- .../elasticsearch/common/util/concurrent/CountDownTests.java | 3 ++- .../elasticsearch/common/util/concurrent/KeyedLockTests.java | 3 ++- .../org/elasticsearch/common/util/concurrent/RunOnceTests.java | 3 ++- .../org/elasticsearch/node/ResponseCollectorServiceTests.java | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java index 1a32064fe7daa..46021344fb73a 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTests.java @@ -34,12 +34,13 @@ public void testConcurrent() throws InterruptedException { final AtomicInteger count = new AtomicInteger(0); final CountDown countDown = new CountDown(scaledRandomIntBetween(10, 1000)); Thread[] threads = new Thread[between(3, 10)]; - final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch latch = new CountDownLatch(1 + threads.length); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread() { @Override public void run() { + latch.countDown(); try { latch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java index e50e205ff1386..2160052619c11 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java @@ -45,8 +45,8 @@ public void testIfMapEmptyAfterLotsOfAcquireAndReleases() throws InterruptedExce for (int i = 0; i < names.length; i++) { names[i] = randomRealisticUnicodeOfLengthBetween(10, 20); } - CountDownLatch startLatch = new CountDownLatch(1); int numThreads = randomIntBetween(3, 10); + final CountDownLatch startLatch = new CountDownLatch(1 + numThreads); AcquireAndReleaseThread[] threads = new AcquireAndReleaseThread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new AcquireAndReleaseThread(startLatch, connectionLock, names, counter, safeCounter); @@ -157,6 +157,7 @@ public AcquireAndReleaseThread(CountDownLatch startLatch, KeyedLock conn @Override public void run() { + startLatch.countDown(); try { startLatch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java index e833edc9d56b3..a41d37be2150a 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java @@ -45,9 +45,10 @@ public void testRunOnceConcurrently() throws InterruptedException { final RunOnce runOnce = new RunOnce(counter::incrementAndGet); final Thread[] threads = new Thread[between(3, 10)]; - final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch latch = new CountDownLatch(1 + threads.length); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread(() -> { + latch.countDown(); try { latch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java b/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java index 5fedfa7869e8b..7ac254f9948f8 100644 --- a/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java @@ -77,9 +77,10 @@ public void testNodeStats() throws Exception { public void testConcurrentAddingAndRemoving() throws Exception { String[] nodes = new String[] {"a", "b", "c", "d"}; - final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch latch = new CountDownLatch(5); Runnable f = () -> { + latch.countDown(); try { latch.await(); } catch (InterruptedException e) { From 03e53e8a95859b3bb14ccf60f76f85726b55fe7d Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 15 May 2019 01:10:31 -0400 Subject: [PATCH 098/321] Cacheability improvements for thirdparty audit task (#42085) --- .../gradle/precommit/ThirdPartyAuditTask.java | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java index 8ec979420c0e4..e73a9d1e585e3 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java @@ -28,11 +28,13 @@ import org.gradle.api.file.FileTree; import org.gradle.api.specs.Spec; import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.Classpath; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Optional; -import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.PathSensitive; import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; @@ -45,6 +47,7 @@ import java.net.URISyntaxException; import java.net.URL; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; import java.util.Arrays; import java.util.Collections; import java.util.Set; @@ -113,7 +116,7 @@ public void setJavaHome(String javaHome) { this.javaHome = javaHome; } - @OutputDirectory + @Internal public File getJarExpandDir() { return new File( new File(getProject().getBuildDir(), "precommit/thirdPartyAudit"), @@ -121,6 +124,11 @@ public File getJarExpandDir() { ); } + @OutputFile + public File getSuccessMarker() { + return new File(getProject().getBuildDir(), "markers/" + getName()); + } + public void ignoreMissingClasses(String... classesOrPackages) { if (classesOrPackages.length == 0) { missingClassExcludes = null; @@ -157,8 +165,7 @@ public Set getMissingClassExcludes() { return missingClassExcludes; } - @InputFiles - @PathSensitive(PathSensitivity.NAME_ONLY) + @Classpath @SkipWhenEmpty public Set getJarsToScan() { // These are SelfResolvingDependency, and some of them backed by file collections, like the Gradle API files, @@ -241,6 +248,10 @@ public void runThirdPartyAudit() throws IOException { } assertNoJarHell(jdkJarHellClasses); + + // Mark successful third party audit check + getSuccessMarker().getParentFile().mkdirs(); + Files.write(getSuccessMarker().toPath(), new byte[]{}); } private void logForbiddenAPIsOutput(String forbiddenApisOutput) { From ed3230b3ebbddbe0cf049c76ce42fde79f1b793c Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 15 May 2019 09:26:04 -0400 Subject: [PATCH 099/321] Minor cluster coordination docs fixes (#42111) Fixes a typo and a badly-formatted warning. --- docs/reference/modules/discovery/discovery-settings.asciidoc | 2 +- docs/reference/setup/important-settings/heap-size.asciidoc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/modules/discovery/discovery-settings.asciidoc b/docs/reference/modules/discovery/discovery-settings.asciidoc index 541cb15bf1108..84472552cfced 100644 --- a/docs/reference/modules/discovery/discovery-settings.asciidoc +++ b/docs/reference/modules/discovery/discovery-settings.asciidoc @@ -31,7 +31,7 @@ Discovery and cluster formation are also affected by the following _expert-level_ settings, although it is not recommended to change any of these from their default values. -[WARNING] If you adjust these settings then your cluster may not form correctly +WARNING: If you adjust these settings then your cluster may not form correctly or may become unstable or intolerant of certain failures. `discovery.cluster_formation_warning_timeout`:: diff --git a/docs/reference/setup/important-settings/heap-size.asciidoc b/docs/reference/setup/important-settings/heap-size.asciidoc index 890a9786e09a5..37e417e086e9b 100644 --- a/docs/reference/setup/important-settings/heap-size.asciidoc +++ b/docs/reference/setup/important-settings/heap-size.asciidoc @@ -10,7 +10,7 @@ Elasticsearch will assign the entire heap specified in heap size) settings. You should set these two settings to be equal to each other. -The value for these setting depends on the amount of RAM available on your +The value for these settings depends on the amount of RAM available on your server: * Set `Xmx` and `Xms` to no more than 50% of your physical RAM. {es} requires From 4b28f5b57c0836b0cf99fe8351a41ddc21563972 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Wed, 15 May 2019 10:48:11 -0400 Subject: [PATCH 100/321] remove backcompat handling of 6.1.x versions (#42032) relates to refactoring initiative #41164. --- .../percolator/PercolateQueryBuilder.java | 14 ++------ .../percolator/PercolatorFieldMapper.java | 6 ++-- .../PercolatorMatchedSlotSubFetchPhase.java | 2 +- .../percolator/CandidateQueryTests.java | 3 +- .../main/java/org/elasticsearch/Version.java | 20 ----------- .../index/mapper/FieldNamesFieldMapper.java | 6 +--- .../index/mapper/SeqNoFieldMapper.java | 7 ---- .../index/query/ExistsQueryBuilder.java | 29 --------------- .../index/query/ExistsQueryBuilderTests.java | 22 +----------- .../query/QueryStringQueryBuilderTests.java | 7 ++-- .../index/query/RangeQueryBuilderTests.java | 10 ++---- .../rest/yaml/section/SkipSectionTests.java | 5 +-- .../license/LicensesMetaData.java | 20 +++++------ .../xpack/core/ml/action/CloseJobAction.java | 9 ++--- .../core/ml/action/GetDatafeedsAction.java | 9 ++--- .../ml/action/GetDatafeedsStatsAction.java | 9 ++--- .../xpack/core/ml/action/GetJobsAction.java | 9 ++--- .../core/ml/action/GetJobsStatsAction.java | 8 ++--- .../core/ml/action/StopDatafeedAction.java | 9 ++--- .../xpack/core/ml/job/config/Job.java | 36 +++---------------- .../xpack/core/ml/job/config/JobUpdate.java | 22 +++--------- .../core/ml/job/config/JobUpdateTests.java | 5 +-- .../xpack/ml/MlConfigMigrator.java | 12 ------- .../ml/job/results/AutodetectResult.java | 25 +++++-------- .../action/TransportOpenJobActionTests.java | 12 ++++--- .../xpack/security/SecurityTests.java | 14 +++++--- 26 files changed, 74 insertions(+), 256 deletions(-) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index c883bb5893c9f..3021f5b31606e 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -293,17 +293,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeVInt(documents.size()); - for (BytesReference document : documents) { - out.writeBytesReference(document); - } - } else { - if (documents.size() > 1) { - throw new IllegalArgumentException("Nodes prior to 6.1.0 cannot accept multiple documents"); - } - BytesReference doc = documents.isEmpty() ? null : documents.iterator().next(); - out.writeOptionalBytesReference(doc); + out.writeVInt(documents.size()); + for (BytesReference document : documents) { + out.writeBytesReference(document); } if (documents.isEmpty() == false) { out.writeEnum(documentXContentType); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 9d612c0c2926b..d2038e2e2bfde 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -285,7 +285,7 @@ Tuple createCandidateQuery(IndexReader indexReader, Versi } BooleanQuery.Builder candidateQuery = new BooleanQuery.Builder(); - if (canUseMinimumShouldMatchField && indexVersion.onOrAfter(Version.V_6_1_0)) { + if (canUseMinimumShouldMatchField) { LongValuesSource valuesSource = LongValuesSource.fromIntField(minimumShouldMatchField.name()); for (BytesRef extractedTerm : extractedTerms) { subQueries.add(new TermQuery(new Term(queryTermsField.name(), extractedTerm))); @@ -471,9 +471,7 @@ void processQuery(Query query, ParseContext context) { for (IndexableField field : fields) { context.doc().add(field); } - if (indexVersionCreated.onOrAfter(Version.V_6_1_0)) { - doc.add(new NumericDocValuesField(minimumShouldMatchFieldMapper.name(), result.minimumShouldMatch)); - } + doc.add(new NumericDocValuesField(minimumShouldMatchFieldMapper.name(), result.minimumShouldMatch)); } static Query parseQuery(QueryShardContext context, boolean mapUnmappedFieldsAsString, XContentParser parser) throws IOException { diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java index b29c12c48dc99..bcec2548de307 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java @@ -73,7 +73,7 @@ static void innerHitsExecute(Query mainQuery, String fieldName = singlePercolateQuery ? FIELD_NAME_PREFIX : FIELD_NAME_PREFIX + "_" + percolateQuery.getName(); IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher(); Weight weight = percolatorIndexSearcher.createWeight(percolatorIndexSearcher.rewrite(Queries.newNonNestedFilter()), - ScoreMode.COMPLETE_NO_SCORES, 1f); + ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0)); int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc(); BitSet rootDocs = BitSet.of(s.iterator(), memoryIndexMaxDoc); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 07f47df41e60d..b191dd948c574 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -97,6 +97,7 @@ import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.VersionUtils; import org.junit.After; import org.junit.Before; @@ -593,7 +594,7 @@ public void testRangeQueries() throws Exception { IndexSearcher shardSearcher = newSearcher(directoryReader); shardSearcher.setQueryCache(null); - Version v = Version.V_6_1_0; + Version v = VersionUtils.randomIndexCompatibleVersion(random()); MemoryIndex memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new IntPoint("int_field", 3)), new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); Query query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 467a8b5cce177..dacb28f90b351 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -46,16 +46,6 @@ public class Version implements Comparable, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - public static final int V_6_1_0_ID = 6010099; - public static final Version V_6_1_0 = new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); - public static final int V_6_1_1_ID = 6010199; - public static final Version V_6_1_1 = new Version(V_6_1_1_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); - public static final int V_6_1_2_ID = 6010299; - public static final Version V_6_1_2 = new Version(V_6_1_2_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); - public static final int V_6_1_3_ID = 6010399; - public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); - public static final int V_6_1_4_ID = 6010499; - public static final Version V_6_1_4 = new Version(V_6_1_4_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); // The below version is missing from the 7.3 JAR private static final org.apache.lucene.util.Version LUCENE_7_2_1 = org.apache.lucene.util.Version.fromBits(7, 2, 1); public static final int V_6_2_0_ID = 6020099; @@ -196,16 +186,6 @@ public static Version fromId(int id) { return V_6_2_1; case V_6_2_0_ID: return V_6_2_0; - case V_6_1_4_ID: - return V_6_1_4; - case V_6_1_3_ID: - return V_6_1_3; - case V_6_1_2_ID: - return V_6_1_2; - case V_6_1_1_ID: - return V_6_1_1; - case V_6_1_0_ID: - return V_6_1_0; case V_EMPTY_ID: return V_EMPTY; default: diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 32c44fd5f55a0..12e53a5f9d4b9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; -import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; @@ -202,10 +201,7 @@ public void preParse(ParseContext context) { } @Override - public void postParse(ParseContext context) throws IOException { - if (context.indexSettings().getIndexVersionCreated().before(Version.V_6_1_0)) { - super.parse(context); - } + public void postParse(ParseContext context) { } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index 8c032402b5090..7eb8d180547c1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -255,15 +254,9 @@ public void postParse(ParseContext context) throws IOException { // we share the parent docs fields to ensure good compression SequenceIDFields seqID = context.seqID(); assert seqID != null; - final Version versionCreated = context.mapperService().getIndexSettings().getIndexVersionCreated(); - final boolean includePrimaryTerm = versionCreated.before(Version.V_6_1_0); for (Document doc : context.nonRootDocuments()) { doc.add(seqID.seqNo); doc.add(seqID.seqNoDocValue); - if (includePrimaryTerm) { - // primary terms are used to distinguish between parent and nested docs since 6.1.0 - doc.add(seqID.primaryTerm); - } } } diff --git a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java index 7a2373e5ad8b5..c5894a3e1c018 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java @@ -19,14 +19,11 @@ package org.elasticsearch.index.query; -import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -148,10 +145,6 @@ public static Query newFilter(QueryShardContext context, String fieldPattern) { fields = context.simpleMatchToIndexNames(fieldPattern); } - if (context.indexVersionCreated().before(Version.V_6_1_0)) { - return newLegacyExistsQuery(context, fields); - } - if (fields.size() == 1) { String field = fields.iterator().next(); return newFieldExistsQuery(context, field); @@ -164,28 +157,6 @@ public static Query newFilter(QueryShardContext context, String fieldPattern) { return new ConstantScoreQuery(boolFilterBuilder.build()); } - private static Query newLegacyExistsQuery(QueryShardContext context, Collection fields) { - // We create TermsQuery directly here rather than using FieldNamesFieldType.termsQuery() - // so we don't end up with deprecation warnings - if (fields.size() == 1) { - Query filter = newLegacyExistsQuery(context, fields.iterator().next()); - return new ConstantScoreQuery(filter); - } - - BooleanQuery.Builder boolFilterBuilder = new BooleanQuery.Builder(); - for (String field : fields) { - Query filter = newLegacyExistsQuery(context, field); - boolFilterBuilder.add(filter, BooleanClause.Occur.SHOULD); - } - return new ConstantScoreQuery(boolFilterBuilder.build()); - } - - private static Query newLegacyExistsQuery(QueryShardContext context, String field) { - MappedFieldType fieldType = context.fieldMapper(field); - String fieldName = fieldType != null ? fieldType.name() : field; - return new TermQuery(new Term(FieldNamesFieldMapper.NAME, fieldName)); - } - private static Query newFieldExistsQuery(QueryShardContext context, String field) { MappedFieldType fieldType = context.getMapperService().fullName(field); if (fieldType == null) { diff --git a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java index a5329856630d5..47bd8d8a34c14 100644 --- a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.elasticsearch.Version; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; @@ -65,26 +64,7 @@ protected void doAssertLuceneQuery(ExistsQueryBuilder queryBuilder, Query query, Collection fields = context.getQueryShardContext().simpleMatchToIndexNames(fieldPattern); Collection mappedFields = fields.stream().filter((field) -> context.getQueryShardContext().getObjectMapper(field) != null || context.getQueryShardContext().getMapperService().fullName(field) != null).collect(Collectors.toList()); - if (context.mapperService().getIndexSettings().getIndexVersionCreated().before(Version.V_6_1_0)) { - if (fields.size() == 1) { - assertThat(query, instanceOf(ConstantScoreQuery.class)); - ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) query; - String field = expectedFieldName(fields.iterator().next()); - assertThat(constantScoreQuery.getQuery(), instanceOf(TermQuery.class)); - TermQuery termQuery = (TermQuery) constantScoreQuery.getQuery(); - assertEquals(field, termQuery.getTerm().text()); - } else { - assertThat(query, instanceOf(ConstantScoreQuery.class)); - ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) query; - assertThat(constantScoreQuery.getQuery(), instanceOf(BooleanQuery.class)); - BooleanQuery booleanQuery = (BooleanQuery) constantScoreQuery.getQuery(); - assertThat(booleanQuery.clauses().size(), equalTo(mappedFields.size())); - for (int i = 0; i < mappedFields.size(); i++) { - BooleanClause booleanClause = booleanQuery.clauses().get(i); - assertThat(booleanClause.getOccur(), equalTo(BooleanClause.Occur.SHOULD)); - } - } - } else if (fields.size() == 1 && mappedFields.size() == 0) { + if (fields.size() == 1 && mappedFields.size() == 0) { assertThat(query, instanceOf(MatchNoDocsQuery.class)); MatchNoDocsQuery matchNoDocsQuery = (MatchNoDocsQuery) query; assertThat(matchNoDocsQuery.toString(null), diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 5ace39c0890df..001df6deb5647 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -50,7 +50,6 @@ import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; @@ -1033,8 +1032,7 @@ public void testExistsFieldQuery() throws Exception { QueryShardContext context = createShardContext(); QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder(STRING_FIELD_NAME + ":*"); Query query = queryBuilder.toQuery(context); - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) - && (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false)) { + if (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false) { assertThat(query, equalTo(new ConstantScoreQuery(new NormsFieldExistsQuery(STRING_FIELD_NAME)))); } else { assertThat(query, equalTo(new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME))))); @@ -1044,8 +1042,7 @@ public void testExistsFieldQuery() throws Exception { String value = (quoted ? "\"" : "") + STRING_FIELD_NAME + (quoted ? "\"" : ""); queryBuilder = new QueryStringQueryBuilder("_exists_:" + value); query = queryBuilder.toQuery(context); - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) - && (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false)) { + if (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false) { assertThat(query, equalTo(new ConstantScoreQuery(new NormsFieldExistsQuery(STRING_FIELD_NAME)))); } else { assertThat(query, equalTo(new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME))))); diff --git a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index 30780d5c49018..d270a8c7113b5 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -32,7 +32,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.lucene.BytesRefs; @@ -139,11 +138,9 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, String expectedFieldName = expectedFieldName(queryBuilder.fieldName()); if (queryBuilder.from() == null && queryBuilder.to() == null) { final Query expectedQuery; - if (context.mapperService().getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) - && context.mapperService().fullName(queryBuilder.fieldName()).hasDocValues()) { + if (context.mapperService().fullName(queryBuilder.fieldName()).hasDocValues()) { expectedQuery = new ConstantScoreQuery(new DocValuesFieldExistsQuery(expectedFieldName)); - } else if (context.mapperService().getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) && - context.mapperService().fullName(queryBuilder.fieldName()).omitNorms() == false) { + } else if (context.mapperService().fullName(queryBuilder.fieldName()).omitNorms() == false) { expectedQuery = new ConstantScoreQuery(new NormsFieldExistsQuery(expectedFieldName)); } else { expectedQuery = new ConstantScoreQuery(new TermQuery(new Term(FieldNamesFieldMapper.NAME, expectedFieldName))); @@ -425,8 +422,7 @@ protected MappedFieldType.Relation getRelation(QueryRewriteContext queryRewriteC // Range query with open bounds rewrite to an exists query Query luceneQuery = rewrittenRange.toQuery(queryShardContext); final Query expectedQuery; - if (queryShardContext.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) - && queryShardContext.fieldMapper(query.fieldName()).hasDocValues()) { + if (queryShardContext.fieldMapper(query.fieldName()).hasDocValues()) { expectedQuery = new ConstantScoreQuery(new DocValuesFieldExistsQuery(query.fieldName())); } else { expectedQuery = new ConstantScoreQuery(new TermQuery(new Term(FieldNamesFieldMapper.NAME, query.fieldName()))); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java index 2e5081cab34eb..e92ef2ce13576 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java @@ -54,15 +54,16 @@ public void testMessage() { } public void testParseSkipSectionVersionNoFeature() throws Exception { + Version version = VersionUtils.randomVersion(random()); parser = createParser(YamlXContent.yamlXContent, - "version: \" - 6.1.1\"\n" + + "version: \" - " + version + "\"\n" + "reason: Delete ignores the parent param" ); SkipSection skipSection = SkipSection.parse(parser); assertThat(skipSection, notNullValue()); assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion())); - assertThat(skipSection.getUpperVersion(), equalTo(Version.V_6_1_1)); + assertThat(skipSection.getUpperVersion(), equalTo(version)); assertThat(skipSection.getFeatures().size(), equalTo(0)); assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java index 6d001dea516ac..f131b24252e5b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java @@ -166,13 +166,11 @@ public void writeTo(StreamOutput streamOutput) throws IOException { streamOutput.writeBoolean(true); // has a license license.writeTo(streamOutput); } - if (streamOutput.getVersion().onOrAfter(Version.V_6_1_0)) { - if (trialVersion == null) { - streamOutput.writeBoolean(false); - } else { - streamOutput.writeBoolean(true); - Version.writeVersion(trialVersion, streamOutput); - } + if (trialVersion == null) { + streamOutput.writeBoolean(false); + } else { + streamOutput.writeBoolean(true); + Version.writeVersion(trialVersion, streamOutput); } } @@ -182,11 +180,9 @@ public LicensesMetaData(StreamInput streamInput) throws IOException { } else { license = LICENSE_TOMBSTONE; } - if (streamInput.getVersion().onOrAfter(Version.V_6_1_0)) { - boolean hasExercisedTrial = streamInput.readBoolean(); - if (hasExercisedTrial) { - this.trialVersion = Version.readVersion(streamInput); - } + boolean hasExercisedTrial = streamInput.readBoolean(); + if (hasExercisedTrial) { + this.trialVersion = Version.readVersion(streamInput); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java index 9bc413b2e220f..95ec597bb9cd8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.tasks.BaseTasksRequest; @@ -91,9 +90,7 @@ public Request(StreamInput in) throws IOException { force = in.readBoolean(); openJobIds = in.readStringArray(); local = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoJobs = in.readBoolean(); - } + allowNoJobs = in.readBoolean(); } @Override @@ -104,9 +101,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(force); out.writeStringArray(openJobIds); out.writeBoolean(local); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoJobs); - } + out.writeBoolean(allowNoJobs); } public Request(String jobId) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java index 59589fa34ef9d..950fa58af95c8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; @@ -58,18 +57,14 @@ public Request() { public Request(StreamInput in) throws IOException { super(in); datafeedId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoDatafeeds = in.readBoolean(); - } + allowNoDatafeeds = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(datafeedId); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoDatafeeds); - } + out.writeBoolean(allowNoDatafeeds); } public String getDatafeedId() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java index 6dbb86fbcd082..39055501444f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; @@ -62,18 +61,14 @@ public Request() {} public Request(StreamInput in) throws IOException { super(in); datafeedId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoDatafeeds = in.readBoolean(); - } + allowNoDatafeeds = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(datafeedId); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoDatafeeds); - } + out.writeBoolean(allowNoDatafeeds); } public String getDatafeedId() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java index 18428eff13758..98b1eb7a118f9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; @@ -56,18 +55,14 @@ public Request() { public Request(StreamInput in) throws IOException { super(in); jobId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoJobs = in.readBoolean(); - } + allowNoJobs = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoJobs); - } + out.writeBoolean(allowNoJobs); } public void setAllowNoJobs(boolean allowNoJobs) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index d4c7124af3238..17de9dfc3522c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -85,9 +85,7 @@ public Request(StreamInput in) throws IOException { super(in); jobId = in.readString(); expandedJobsIds = in.readStringList(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoJobs = in.readBoolean(); - } + allowNoJobs = in.readBoolean(); } @Override @@ -95,9 +93,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); out.writeStringCollection(expandedJobsIds); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoJobs); - } + out.writeBoolean(allowNoJobs); } public List getExpandedJobsIds() { return expandedJobsIds; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java index c914150173b4a..0021040c69801 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; @@ -96,9 +95,7 @@ public Request(StreamInput in) throws IOException { resolvedStartedDatafeedIds = in.readStringArray(); stopTimeout = in.readTimeValue(); force = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - allowNoDatafeeds = in.readBoolean(); - } + allowNoDatafeeds = in.readBoolean(); } public String getDatafeedId() { @@ -160,9 +157,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(resolvedStartedDatafeedIds); out.writeTimeValue(stopTimeout); out.writeBoolean(force); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(allowNoDatafeeds); - } + out.writeBoolean(allowNoDatafeeds); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index de37702fe5246..1cb44f9625cb5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -186,11 +186,7 @@ public Job(StreamInput in) throws IOException { jobId = in.readString(); jobType = in.readString(); jobVersion = in.readBoolean() ? Version.readVersion(in) : null; - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - groups = Collections.unmodifiableList(in.readStringList()); - } else { - groups = Collections.emptyList(); - } + groups = Collections.unmodifiableList(in.readStringList()); description = in.readOptionalString(); createTime = new Date(in.readVLong()); finishedTime = in.readBoolean() ? new Date(in.readVLong()) : null; @@ -200,10 +196,6 @@ public Job(StreamInput in) throws IOException { in.readVLong(); } } - // for removed establishedModelMemory field - if (in.getVersion().onOrAfter(Version.V_6_1_0) && in.getVersion().before(Version.V_7_0_0)) { - in.readOptionalLong(); - } analysisConfig = new AnalysisConfig(in); analysisLimits = in.readOptionalWriteable(AnalysisLimits::new); dataDescription = in.readOptionalWriteable(DataDescription::new); @@ -449,9 +441,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeStringCollection(groups); - } + out.writeStringCollection(groups); out.writeOptionalString(description); out.writeVLong(createTime.getTime()); if (finishedTime != null) { @@ -464,10 +454,6 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(false); } - // for removed establishedModelMemory field - if (out.getVersion().onOrAfter(Version.V_6_1_0) && out.getVersion().before(Version.V_7_0_0)) { - out.writeOptionalLong(null); - } analysisConfig.writeTo(out); out.writeOptionalWriteable(analysisLimits); out.writeOptionalWriteable(dataDescription); @@ -676,11 +662,7 @@ public Builder(StreamInput in) throws IOException { id = in.readOptionalString(); jobType = in.readString(); jobVersion = in.readBoolean() ? Version.readVersion(in) : null; - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - groups = in.readStringList(); - } else { - groups = Collections.emptyList(); - } + groups = in.readStringList(); description = in.readOptionalString(); createTime = in.readBoolean() ? new Date(in.readVLong()) : null; finishedTime = in.readBoolean() ? new Date(in.readVLong()) : null; @@ -690,10 +672,6 @@ public Builder(StreamInput in) throws IOException { in.readVLong(); } } - // for removed establishedModelMemory field - if (in.getVersion().onOrAfter(Version.V_6_1_0) && in.getVersion().before(Version.V_7_0_0)) { - in.readOptionalLong(); - } analysisConfig = in.readOptionalWriteable(AnalysisConfig::new); analysisLimits = in.readOptionalWriteable(AnalysisLimits::new); dataDescription = in.readOptionalWriteable(DataDescription::new); @@ -861,9 +839,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeStringCollection(groups); - } + out.writeStringCollection(groups); out.writeOptionalString(description); if (createTime != null) { out.writeBoolean(true); @@ -881,10 +857,6 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(false); } - // for removed establishedModelMemory field - if (out.getVersion().onOrAfter(Version.V_6_1_0) && out.getVersion().before(Version.V_7_0_0)) { - out.writeOptionalLong(null); - } out.writeOptionalWriteable(analysisConfig); out.writeOptionalWriteable(analysisLimits); out.writeOptionalWriteable(dataDescription); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 36e1fc1096675..81a0e017c6584 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -106,12 +106,8 @@ private JobUpdate(String jobId, @Nullable List groups, @Nullable String public JobUpdate(StreamInput in) throws IOException { jobId = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - String[] groupsArray = in.readOptionalStringArray(); - groups = groupsArray == null ? null : Arrays.asList(groupsArray); - } else { - groups = null; - } + String[] groupsArray = in.readOptionalStringArray(); + groups = groupsArray == null ? null : Arrays.asList(groupsArray); description = in.readOptionalString(); if (in.readBoolean()) { detectorUpdates = in.readList(DetectorUpdate::new); @@ -131,10 +127,6 @@ public JobUpdate(StreamInput in) throws IOException { } customSettings = in.readMap(); modelSnapshotId = in.readOptionalString(); - // was establishedModelMemory - if (in.getVersion().onOrAfter(Version.V_6_1_0) && in.getVersion().before(Version.V_7_0_0)) { - in.readOptionalLong(); - } if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.readBoolean()) { jobVersion = Version.readVersion(in); } else { @@ -155,10 +147,8 @@ public JobUpdate(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - String[] groupsArray = groups == null ? null : groups.toArray(new String[groups.size()]); - out.writeOptionalStringArray(groupsArray); - } + String[] groupsArray = groups == null ? null : groups.toArray(new String[groups.size()]); + out.writeOptionalStringArray(groupsArray); out.writeOptionalString(description); out.writeBoolean(detectorUpdates != null); if (detectorUpdates != null) { @@ -176,10 +166,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeMap(customSettings); out.writeOptionalString(modelSnapshotId); - // was establishedModelMemory - if (out.getVersion().onOrAfter(Version.V_6_1_0) && out.getVersion().before(Version.V_7_0_0)) { - out.writeOptionalLong(null); - } if (out.getVersion().onOrAfter(Version.V_6_3_0)) { if (jobVersion != null) { out.writeBoolean(true); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java index e249b22a4a896..eb4f2c0bbc2da 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.VersionUtils; import java.util.ArrayList; import java.util.Arrays; @@ -91,7 +92,7 @@ public JobUpdate createRandom(String jobId, @Nullable Job job) { update.setModelSnapshotMinVersion(Version.CURRENT); } if (useInternalParser && randomBoolean()) { - update.setJobVersion(randomFrom(Version.CURRENT, Version.V_6_2_0, Version.V_6_1_0)); + update.setJobVersion(VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); } if (useInternalParser) { update.setClearFinishTime(randomBoolean()); @@ -213,7 +214,7 @@ public void testMergeWithJob() { updateBuilder.setCategorizationFilters(categorizationFilters); updateBuilder.setCustomSettings(customSettings); updateBuilder.setModelSnapshotId(randomAlphaOfLength(10)); - updateBuilder.setJobVersion(Version.V_6_1_0); + updateBuilder.setJobVersion(VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); JobUpdate update = updateBuilder.build(); Job.Builder jobBuilder = new Job.Builder("foo"); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java index d1673dd3c914c..1d230d93792fc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java @@ -40,7 +40,6 @@ import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; -import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; @@ -513,18 +512,7 @@ public static Job updateJobForMigration(Job job) { String version = job.getJobVersion() != null ? job.getJobVersion().toString() : null; custom.put(MIGRATED_FROM_VERSION, version); builder.setCustomSettings(custom); - // Increase the model memory limit for 6.1 - 6.3 jobs Version jobVersion = job.getJobVersion(); - if (jobVersion != null && jobVersion.onOrAfter(Version.V_6_1_0) && jobVersion.before(Version.V_6_3_0)) { - // Increase model memory limit if < 512MB - if (job.getAnalysisLimits() != null && job.getAnalysisLimits().getModelMemoryLimit() != null && - job.getAnalysisLimits().getModelMemoryLimit() < 512L) { - long updatedModelMemoryLimit = (long) (job.getAnalysisLimits().getModelMemoryLimit() * 1.3); - AnalysisLimits limits = new AnalysisLimits(updatedModelMemoryLimit, - job.getAnalysisLimits().getCategorizationExamplesLimit()); - builder.setAnalysisLimits(limits); - } - } // Pre v5.5 (ml beta) jobs do not have a version. // These jobs cannot be opened, we rely on the missing version // to indicate this. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java index f3afa98b55a46..b831f1b0aee62 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -133,19 +132,14 @@ public AutodetectResult(StreamInput in) throws IOException { this.flushAcknowledgement = null; } - if (in.getVersion().onOrAfter(Version.V_6_1_0)) { - if (in.readBoolean()) { - this.forecast = new Forecast(in); - } else { - this.forecast = null; - } - if (in.readBoolean()) { - this.forecastRequestStats = new ForecastRequestStats(in); - } else { - this.forecastRequestStats = null; - } + if (in.readBoolean()) { + this.forecast = new Forecast(in); } else { this.forecast = null; + } + if (in.readBoolean()) { + this.forecastRequestStats = new ForecastRequestStats(in); + } else { this.forecastRequestStats = null; } } @@ -161,11 +155,8 @@ public void writeTo(StreamOutput out) throws IOException { writeNullable(modelPlot, out); writeNullable(categoryDefinition, out); writeNullable(flushAcknowledgement, out); - - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - writeNullable(forecast, out); - writeNullable(forecastRequestStats, out); - } + writeNullable(forecast, out); + writeNullable(forecastRequestStats, out); } private static void writeNullable(Writeable writeable, StreamOutput out) throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 040ed5e1d0ed4..92d7bbcc49e54 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -370,11 +371,14 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); + Version node1Version = VersionUtils.randomCompatibleVersion(random(), VersionUtils.getPreviousVersion(Version.CURRENT)); + Version node2Version = randomValueOtherThan(node1Version, + () -> VersionUtils.randomCompatibleVersion(random(), VersionUtils.getPreviousVersion(Version.CURRENT))); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.V_6_2_0)) + nodeAttr, Collections.emptySet(), node1Version)) .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_6_1_0)) + nodeAttr, Collections.emptySet(), node2Version)) .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -386,7 +390,7 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() Job job = BaseMlIntegTestCase.createFareQuoteJob("job_with_incompatible_model_snapshot") .setModelSnapshotId("incompatible_snapshot") - .setModelSnapshotMinVersion(Version.V_6_3_0) + .setModelSnapshotMinVersion(Version.CURRENT) .build(new Date()); cs.nodes(nodes); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); @@ -394,7 +398,7 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_incompatible_model_snapshot", job, cs.build(), 10, 2, 30, memoryTracker, isMemoryTrackerRecentlyRefreshed, logger); assertThat(result.getExplanation(), containsString( - "because the job's model snapshot requires a node of version [6.3.0] or higher")); + "because the job's model snapshot requires a node of version [" + Version.CURRENT + "] or higher")); assertNull(result.getExecutorNode()); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 02db3a1e11a46..3f880b21ccc57 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -295,12 +295,15 @@ public void testIndexJoinValidator_Old_And_Rolling() throws Exception { createComponents(Settings.EMPTY); BiConsumer joinValidator = security.getJoinValidator(); assertNotNull(joinValidator); + Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), + VersionUtils.getPreviousVersion(Version.V_7_0_0)); DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_MAIN_ALIAS) - .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_MAIN_INDEX_FORMAT - 1)) + .settings(settings(version) + .put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_MAIN_INDEX_FORMAT - 1)) .numberOfShards(1).numberOfReplicas(0) .build(); - DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.V_6_1_0); + DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), version); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) .nodes(discoveryNodes) @@ -318,7 +321,7 @@ public void testIndexJoinValidator_FullyCurrentCluster() throws Exception { DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); int indexFormat = randomBoolean() ? INTERNAL_MAIN_INDEX_FORMAT : INTERNAL_MAIN_INDEX_FORMAT - 1; IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_MAIN_ALIAS) - .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), indexFormat)) + .settings(settings(VersionUtils.randomIndexCompatibleVersion(random())).put(INDEX_FORMAT_SETTING.getKey(), indexFormat)) .numberOfShards(1).numberOfReplicas(0) .build(); DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.CURRENT); @@ -333,7 +336,7 @@ public void testIndexUpgradeValidatorWithUpToDateIndex() throws Exception { createComponents(Settings.EMPTY); BiConsumer joinValidator = security.getJoinValidator(); assertNotNull(joinValidator); - Version version = randomBoolean() ? Version.CURRENT : Version.V_6_1_0; + Version version = VersionUtils.randomIndexCompatibleVersion(random()); DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_MAIN_ALIAS) .settings(settings(version).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_MAIN_INDEX_FORMAT)) @@ -352,7 +355,8 @@ public void testIndexUpgradeValidatorWithMissingIndex() throws Exception { BiConsumer joinValidator = security.getJoinValidator(); assertNotNull(joinValidator); DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); - DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.V_6_1_0); + DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), + VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) .nodes(discoveryNodes).build(); From 3ac6d527a1386d19008cdd08cdbfef265da30f00 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Wed, 15 May 2019 10:50:54 -0400 Subject: [PATCH 101/321] Docs: Mark SQL Geo functionality as beta (#42138) Adds beta marker to geosql documentation --- docs/reference/sql/functions/geo.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/sql/functions/geo.asciidoc b/docs/reference/sql/functions/geo.asciidoc index f5ed716eaeb29..112ddfffce6ed 100644 --- a/docs/reference/sql/functions/geo.asciidoc +++ b/docs/reference/sql/functions/geo.asciidoc @@ -3,6 +3,8 @@ [[sql-functions-geo]] === Geo Functions +beta[] + The geo functions work with geometries stored in `geo_point` and `geo_shape` fields, or returned by other geo functions. ==== Limitations From 7e0ffaebaf1004c959766b905987fcb5f4bf7f70 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Thu, 16 May 2019 03:53:34 +1000 Subject: [PATCH 102/321] Enforce transport TLS on Basic with Security (#42150) If a basic license enables security, then we should also enforce TLS on the transport interface. This was already the case for Standard/Gold/Platinum licenses. For Basic, security defaults to disabled, so some of the process around checking whether security is actuallY enabled is more complex now that we need to account for basic licenses. --- .../org/elasticsearch/license/License.java | 18 --- .../elasticsearch/license/LicenseService.java | 5 +- .../license/XPackLicenseState.java | 25 +++- .../core/ssl/TLSLicenseBootstrapCheck.java | 10 +- .../ssl/TLSLicenseBootstrapCheckTests.java | 123 ++++++++++++++---- .../xpack/security/Security.java | 15 ++- .../xpack/security/SecurityTests.java | 43 ++++-- 7 files changed, 177 insertions(+), 62 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java index 62ffd76e8ea05..e39b5b7dcc196 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -780,22 +780,4 @@ public Builder validate() { } } - /** - * Returns true iff the license is a production licnese - */ - public boolean isProductionLicense() { - switch (operationMode()) { - case MISSING: - case TRIAL: - case BASIC: - return false; - case STANDARD: - case GOLD: - case PLATINUM: - return true; - default: - throw new AssertionError("unknown operation mode: " + operationMode()); - - } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 837caf2da070b..f750d1349a0ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -218,10 +218,13 @@ public void registerLicense(final PutLicenseRequest request, final ActionListene } } + // This check would be incorrect if "basic" licenses were allowed here + // because the defaults there mean that security can be "off", even if the setting is "on" + // BUT basic licenses are explicitly excluded earlier in this method, so we don't need to worry if (XPackSettings.SECURITY_ENABLED.get(settings)) { // TODO we should really validate that all nodes have xpack installed and are consistently configured but this // should happen on a different level and not in this code - if (newLicense.isProductionLicense() + if (XPackLicenseState.isTransportTlsRequired(newLicense, settings) && XPackSettings.TRANSPORT_SSL_ENABLED.get(settings) == false && isProductionMode(settings, clusterService.localNode())) { // security is on but TLS is not configured we gonna fail the entire request and throw an exception diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 131069d27f628..e206ed3db5149 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -282,7 +282,7 @@ private static class Status { public XPackLicenseState(Settings settings) { this.listeners = new CopyOnWriteArrayList<>(); this.isSecurityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); - this.isSecurityExplicitlyEnabled = isSecurityEnabled && settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()); + this.isSecurityExplicitlyEnabled = isSecurityEnabled && isSecurityExplicitlyEnabled(settings); } private XPackLicenseState(XPackLicenseState xPackLicenseState) { @@ -292,6 +292,10 @@ private XPackLicenseState(XPackLicenseState xPackLicenseState) { this.status = xPackLicenseState.status; } + private static boolean isSecurityExplicitlyEnabled(Settings settings) { + return settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()); + } + /** * Updates the current state of the license, which will change what features are available. * @@ -727,6 +731,25 @@ public synchronized boolean isSecurityDisabledByLicenseDefaults() { return false; } + public static boolean isTransportTlsRequired(License license, Settings settings) { + if (license == null) { + return false; + } + switch (license.operationMode()) { + case STANDARD: + case GOLD: + case PLATINUM: + return XPackSettings.SECURITY_ENABLED.get(settings); + case BASIC: + return XPackSettings.SECURITY_ENABLED.get(settings) && isSecurityExplicitlyEnabled(settings); + case MISSING: + case TRIAL: + return false; + default: + throw new AssertionError("unknown operation mode [" + license.operationMode() + "]"); + } + } + private static boolean isSecurityEnabled(final OperationMode mode, final boolean isSecurityExplicitlyEnabled, final boolean isSecurityEnabled) { switch (mode) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java index 6f6592bbdfca2..a042aeb4a2359 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java @@ -9,6 +9,7 @@ import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.XPackSettings; /** @@ -19,10 +20,11 @@ public final class TLSLicenseBootstrapCheck implements BootstrapCheck { public BootstrapCheckResult check(BootstrapContext context) { if (XPackSettings.TRANSPORT_SSL_ENABLED.get(context.settings()) == false) { License license = LicenseService.getLicense(context.metaData()); - if (license != null && license.isProductionLicense()) { - return BootstrapCheckResult.failure("Transport SSL must be enabled for setups with production licenses. Please set " + - "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting [xpack.security.enabled] " + - "to [false]"); + if (XPackLicenseState.isTransportTlsRequired(license, context.settings())) { + return BootstrapCheckResult.failure("Transport SSL must be enabled if security is enabled on a [" + + license.operationMode().description() + "] license. " + + "Please set [xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]"); } } return BootstrapCheckResult.success(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java index ac73418800c77..3cb14180930d3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java @@ -5,40 +5,115 @@ */ package org.elasticsearch.xpack.core.ssl; +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.License; +import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.license.TestUtils; import org.elasticsearch.test.AbstractBootstrapCheckTestCase; -import java.util.EnumSet; - public class TLSLicenseBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - public void testBootstrapCheck() throws Exception { + public void testBootstrapCheckOnEmptyMetadata() { assertTrue(new TLSLicenseBootstrapCheck().check(emptyContext).isSuccess()); assertTrue(new TLSLicenseBootstrapCheck().check(createTestContext(Settings.builder().put("xpack.security.transport.ssl.enabled" - , randomBoolean()).build(), MetaData.EMPTY_META_DATA)).isSuccess()); - int numIters = randomIntBetween(1,10); - for (int i = 0; i < numIters; i++) { - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); - EnumSet productionModes = EnumSet.of(License.OperationMode.GOLD, License.OperationMode.PLATINUM, - License.OperationMode.STANDARD); - MetaData.Builder builder = MetaData.builder(); - TestUtils.putLicense(builder, license); - MetaData build = builder.build(); - if (productionModes.contains(license.operationMode()) == false) { - assertTrue(new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", true).build(), build)).isSuccess()); - } else { - assertTrue(new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", false).build(), build)).isFailure()); - assertEquals("Transport SSL must be enabled for setups with production licenses. Please set " + - "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + - "[xpack.security.enabled] to [false]", - new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", false).build(), build)).getMessage()); - } + , randomBoolean()).build(), MetaData.EMPTY_META_DATA)).isSuccess()); + } + + public void testBootstrapCheckFailureOnPremiumLicense() throws Exception { + final OperationMode mode = randomFrom(OperationMode.PLATINUM, OperationMode.GOLD, OperationMode.STANDARD); + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.transport.ssl.enabled", false); + } + if (randomBoolean()) { + // randomise between default-true & explicit-true + settings.put("xpack.security.enabled", true); + } + + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(mode, settings); + assertTrue("Expected bootstrap failure", result.isFailure()); + assertEquals("Transport SSL must be enabled if security is enabled on a [" + mode.description() + "] license. Please set " + + "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]", + result.getMessage()); + } + + public void testBootstrapCheckSucceedsWithTlsEnabledOnPremiumLicense() throws Exception { + final OperationMode mode = randomFrom(OperationMode.PLATINUM, OperationMode.GOLD, OperationMode.STANDARD); + final Settings.Builder settings = Settings.builder().put("xpack.security.transport.ssl.enabled", true); + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(mode, settings); + assertSuccess(result); + } + + public void testBootstrapCheckFailureOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder().put("xpack.security.enabled", true); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.transport.ssl.enabled", false); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertTrue("Expected bootstrap failure", result.isFailure()); + assertEquals("Transport SSL must be enabled if security is enabled on a [basic] license. Please set " + + "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]", + result.getMessage()); + } + + public void testBootstrapSucceedsIfSecurityIsNotEnabledOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.enabled", false); + } + if (randomBoolean()) { + // it does not matter whether or not this is set, as security is not enabled. + settings.put("xpack.security.transport.ssl.enabled", randomBoolean()); } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertSuccess(result); } + + public void testBootstrapSucceedsIfTlsIsEnabledOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder().put("xpack.security.transport.ssl.enabled", true); + if (randomBoolean()) { + // it does not matter whether or not this is set, as TLS is enabled. + settings.put("xpack.security.enabled", randomBoolean()); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertSuccess(result); + } + + public void testBootstrapCheckAlwaysSucceedsOnTrialLicense() throws Exception { + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // it does not matter whether this is set, or to which value. + settings.put("xpack.security.enabled", randomBoolean()); + } + if (randomBoolean()) { + // it does not matter whether this is set, or to which value. + settings.put("xpack.security.transport.ssl.enabled", randomBoolean()); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.TRIAL, settings); + assertSuccess(result); + } + + public BootstrapCheck.BootstrapCheckResult runBootstrapCheck(OperationMode mode, Settings.Builder settings) throws Exception { + final License license = TestUtils.generateSignedLicense(mode.description(), TimeValue.timeValueHours(24)); + MetaData.Builder builder = MetaData.builder(); + TestUtils.putLicense(builder, license); + MetaData metaData = builder.build(); + final BootstrapContext context = createTestContext(settings.build(), metaData); + return new TLSLicenseBootstrapCheck().check(context); + } + + public void assertSuccess(BootstrapCheck.BootstrapCheckResult result) { + if (result.isFailure()) { + fail("Bootstrap check failed unexpectedly: " + result.getMessage()); + } + } + } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index a36a004c7f413..a6218522fb7e5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -258,8 +258,8 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.API_KEY_SERVICE_ENABLED_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; -import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_MAIN_TEMPLATE_7; public class Security extends Plugin implements ActionPlugin, IngestPlugin, NetworkPlugin, ClusterPlugin, @@ -1002,7 +1002,7 @@ public Function> getFieldFilter() { public BiConsumer getJoinValidator() { if (enabled) { return new ValidateTLSOnJoin(XPackSettings.TRANSPORT_SSL_ENABLED.get(settings), - DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)) + DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings), settings) .andThen(new ValidateUpgradedSecurityIndex()) .andThen(new ValidateLicenseForFIPS(XPackSettings.FIPS_MODE_ENABLED.get(settings))); } @@ -1012,18 +1012,21 @@ public BiConsumer getJoinValidator() { static final class ValidateTLSOnJoin implements BiConsumer { private final boolean isTLSEnabled; private final String discoveryType; + private final Settings settings; - ValidateTLSOnJoin(boolean isTLSEnabled, String discoveryType) { + ValidateTLSOnJoin(boolean isTLSEnabled, String discoveryType, Settings settings) { this.isTLSEnabled = isTLSEnabled; this.discoveryType = discoveryType; + this.settings = settings; } @Override public void accept(DiscoveryNode node, ClusterState state) { License license = LicenseService.getLicense(state.metaData()); - if (license != null && license.isProductionLicense() && - isTLSEnabled == false && "single-node".equals(discoveryType) == false) { - throw new IllegalStateException("TLS setup is required for license type [" + license.operationMode().name() + "]"); + if (isTLSEnabled == false && "single-node".equals(discoveryType) == false + && XPackLicenseState.isTransportTlsRequired(license, settings)) { + throw new IllegalStateException("Transport TLS ([" + XPackSettings.TRANSPORT_SSL_ENABLED.getKey() + + "]) is required for license type [" + license.operationMode().description() + "] when security is enabled"); } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 3f880b21ccc57..8e6e00f32a90e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -54,7 +54,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -67,8 +66,8 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; import static org.elasticsearch.discovery.DiscoveryModule.ZEN2_DISCOVERY_TYPE; -import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -253,17 +252,45 @@ public void testTLSJoinValidator() throws Exception { int numIters = randomIntBetween(1, 10); for (int i = 0; i < numIters; i++) { boolean tlsOn = randomBoolean(); + boolean securityExplicitlyEnabled = randomBoolean(); String discoveryType = randomFrom("single-node", ZEN2_DISCOVERY_TYPE, randomAlphaOfLength(4)); - Security.ValidateTLSOnJoin validator = new Security.ValidateTLSOnJoin(tlsOn, discoveryType); + + final Settings settings; + if (securityExplicitlyEnabled) { + settings = Settings.builder().put("xpack.security.enabled", true).build(); + } else { + settings = Settings.EMPTY; + } + Security.ValidateTLSOnJoin validator = new Security.ValidateTLSOnJoin(tlsOn, discoveryType, settings); MetaData.Builder builder = MetaData.builder(); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); + License.OperationMode licenseMode = randomFrom(License.OperationMode.values()); + License license = TestUtils.generateSignedLicense(licenseMode.description(), TimeValue.timeValueHours(24)); TestUtils.putLicense(builder, license); ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); - EnumSet productionModes = EnumSet.of(License.OperationMode.GOLD, License.OperationMode.PLATINUM, - License.OperationMode.STANDARD); - if (productionModes.contains(license.operationMode()) && tlsOn == false && "single-node".equals(discoveryType) == false) { + + final boolean expectFailure; + switch (licenseMode) { + case PLATINUM: + case GOLD: + case STANDARD: + expectFailure = tlsOn == false && "single-node".equals(discoveryType) == false; + break; + case BASIC: + expectFailure = tlsOn == false && "single-node".equals(discoveryType) == false && securityExplicitlyEnabled; + break; + case MISSING: + case TRIAL: + expectFailure = false; + break; + default: + throw new AssertionError("unknown operation mode [" + license.operationMode() + "]"); + } + logger.info("Test TLS join; Lic:{} TLS:{} Disco:{} Settings:{} ; Expect Failure: {}", + licenseMode, tlsOn, discoveryType, settings.toDelimitedString(','), expectFailure); + if (expectFailure) { IllegalStateException ise = expectThrows(IllegalStateException.class, () -> validator.accept(node, state)); - assertEquals("TLS setup is required for license type [" + license.operationMode().name() + "]", ise.getMessage()); + assertEquals("Transport TLS ([xpack.security.transport.ssl.enabled]) is required for license type [" + + license.operationMode().description() + "] when security is enabled", ise.getMessage()); } else { validator.accept(node, state); } From 673db8581ceb1bae47b8d8a24e7f0cc945e3a94c Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Wed, 15 May 2019 15:57:11 -0400 Subject: [PATCH 103/321] Add ChaCha20 TLS ciphers on Java 12+ (#42155) Java 12 added support for the ChaCha20 TLS ciphers, so this change conditionally adds these ciphers to the default ciphers if the JVM is Java 12 or later. --- .../common/ssl/SslConfigurationLoader.java | 24 +++++++++++++++---- .../ssl/SslConfigurationLoaderTests.java | 17 +++++++++++++ .../xpack/core/XPackSettings.java | 20 +++++++++++++++- .../xpack/core/XPackSettingsTests.java | 16 +++++++++++++ 4 files changed, 72 insertions(+), 5 deletions(-) diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java index a880090a048ef..e9a1ccad3e950 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.ssl; +import org.elasticsearch.bootstrap.JavaVersion; + import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; import java.nio.file.Path; @@ -66,10 +68,7 @@ public abstract class SslConfigurationLoader { static final List DEFAULT_PROTOCOLS = List.of("TLSv1.3", "TLSv1.2", "TLSv1.1"); - /** - * This list has been created with ordering - */ - static final List DEFAULT_CIPHERS = List.of( + private static final List JDK11_CIPHERS = List.of( "TLS_AES_256_GCM_SHA384", "TLS_AES_128_GCM_SHA256", // TLSv1.3 cipher has PFS, AEAD, hardware support "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support @@ -80,6 +79,23 @@ public abstract class SslConfigurationLoader { "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", // AEAD, hardware support "TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256", // hardware support "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA"); // hardware support + + private static final List JDK12_CIPHERS = List.of( + "TLS_AES_256_GCM_SHA384", "TLS_AES_128_GCM_SHA256", // TLSv1.3 cipher has PFS, AEAD, hardware support + "TLS_CHACHA20_POLY1305_SHA256", // TLSv1.3 cipher has PFS, AEAD + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // PFS, AEAD + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // PFS, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // PFS, hardware support + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // PFS, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // PFS, hardware support + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", // AEAD, hardware support + "TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256", // hardware support + "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA"); // hardware support + + static final List DEFAULT_CIPHERS = + JavaVersion.current().compareTo(JavaVersion.parse("12")) > -1 ? JDK12_CIPHERS : JDK11_CIPHERS; private static final char[] EMPTY_PASSWORD = new char[0]; private final String settingPrefix; diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java index 20a161b78fd5f..b8648efe49618 100644 --- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.ssl; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -33,8 +34,10 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; public class SslConfigurationLoaderTests extends ESTestCase { @@ -217,4 +220,18 @@ public void testLoadKeysFromJKS() { assertThat(keyConfig.getDependentFiles(), containsInAnyOrder(getDataPath("/certs/cert-all/certs.jks"))); assertThat(keyConfig.createKeyManager(), notNullValue()); } + + public void testChaCha20InCiphersOnJdk12Plus() { + assumeTrue("Test is only valid on JDK 12+ JVM", JavaVersion.current().compareTo(JavaVersion.parse("12")) > -1); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, hasItem("TLS_CHACHA20_POLY1305_SHA256")); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, hasItem("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256")); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, hasItem("TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256")); + } + + public void testChaCha20NotInCiphersOnPreJdk12() { + assumeTrue("Test is only valid on pre JDK 12 JVM", JavaVersion.current().compareTo(JavaVersion.parse("12")) < 0); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, not(hasItem("TLS_CHACHA20_POLY1305_SHA256"))); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, not(hasItem("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"))); + assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, not(hasItem("TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"))); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 92f8915e922f1..848aacd621111 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.xpack.core.security.SecurityField; @@ -117,7 +118,7 @@ private XPackSettings() { * SSL settings. These are the settings that are specifically registered for SSL. Many are private as we do not explicitly use them * but instead parse based on a prefix (eg *.ssl.*) */ - public static final List DEFAULT_CIPHERS = List.of( + private static final List JDK11_CIPHERS = List.of( "TLS_AES_256_GCM_SHA384", "TLS_AES_128_GCM_SHA256", // TLSv1.3 cipher has PFS, AEAD, hardware support "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support @@ -129,6 +130,23 @@ private XPackSettings() { "TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256", // hardware support "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA"); // hardware support + private static final List JDK12_CIPHERS = List.of( + "TLS_AES_256_GCM_SHA384", "TLS_AES_128_GCM_SHA256", // TLSv1.3 cipher has PFS, AEAD, hardware support + "TLS_CHACHA20_POLY1305_SHA256", // TLSv1.3 cipher has PFS, AEAD + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // PFS, AEAD, hardware support + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // PFS, AEAD + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // PFS, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // PFS, hardware support + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // PFS, hardware support + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // PFS, hardware support + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", // AEAD, hardware support + "TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256", // hardware support + "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA"); // hardware support + + public static final List DEFAULT_CIPHERS = + JavaVersion.current().compareTo(JavaVersion.parse("12")) > -1 ? JDK12_CIPHERS : JDK11_CIPHERS; + /* * Do not allow insecure hashing algorithms to be used for password hashing */ diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java index 924e55be51cb4..9f430819225a4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import javax.crypto.SecretKeyFactory; @@ -14,6 +15,7 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.not; public class XPackSettingsTests extends ESTestCase { @@ -22,6 +24,20 @@ public void testDefaultSSLCiphers() { assertThat(XPackSettings.DEFAULT_CIPHERS, hasItem("TLS_RSA_WITH_AES_256_CBC_SHA")); } + public void testChaCha20InCiphersOnJdk12Plus() { + assumeTrue("Test is only valid on JDK 12+ JVM", JavaVersion.current().compareTo(JavaVersion.parse("12")) > -1); + assertThat(XPackSettings.DEFAULT_CIPHERS, hasItem("TLS_CHACHA20_POLY1305_SHA256")); + assertThat(XPackSettings.DEFAULT_CIPHERS, hasItem("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256")); + assertThat(XPackSettings.DEFAULT_CIPHERS, hasItem("TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256")); + } + + public void testChaCha20NotInCiphersOnPreJdk12() { + assumeTrue("Test is only valid on pre JDK 12 JVM", JavaVersion.current().compareTo(JavaVersion.parse("12")) < 0); + assertThat(XPackSettings.DEFAULT_CIPHERS, not(hasItem("TLS_CHACHA20_POLY1305_SHA256"))); + assertThat(XPackSettings.DEFAULT_CIPHERS, not(hasItem("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"))); + assertThat(XPackSettings.DEFAULT_CIPHERS, not(hasItem("TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"))); + } + public void testPasswordHashingAlgorithmSettingValidation() { final boolean isPBKDF2Available = isSecretkeyFactoryAlgoAvailable("PBKDF2WithHMACSHA512"); final String pbkdf2Algo = randomFrom("PBKDF2_10000", "PBKDF2"); From 91039bab12d3ef27d6eac9cdc891a3b3ad0c694d Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Wed, 15 May 2019 16:06:03 -0400 Subject: [PATCH 104/321] SQL: Fix issue regarding INTERVAL * number (#42014) Interval * integer number is a valid operation which previously was only supported for foldables (literals) and not when a field was involved. That was because: 1. There was no common type returned for that combination 2. The `BinaryArithmeticOperation` was permitting the multiplication (called by fold()) but the BinaryArithmeticProcessor didn't allow it Moreover the error message for invalid arithmetic operations was wrong because of the issue with the overloading methods of `LoggerMessageFormat.format`. Fixes: #41239 Fixes: #41200 --- .../main/resources/datetime-interval.csv-spec | 20 ++++++++++++++++++ .../arithmetic/BinaryArithmeticProcessor.java | 6 +++--- .../DateTimeArithmeticOperation.java | 8 ++++++- .../predicate/operator/arithmetic/Mul.java | 2 +- .../predicate/operator/arithmetic/Sub.java | 4 ++++ .../xpack/sql/type/DataTypeConversion.java | 11 ++++++++++ .../analyzer/VerifierErrorMessagesTests.java | 21 +++++++++++++++++++ .../sql/type/DataTypeConversionTests.java | 4 ++++ 8 files changed, 71 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec index 8d9a65d1b85b6..bfb28775bc3b6 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec @@ -182,6 +182,26 @@ SELECT -2 * INTERVAL '1 23:45' DAY TO MINUTES AS result; -3 23:30:00.0 ; +intervalHoursMultiply +SELECT 4 * -INTERVAL '2' HOURS AS result1, -5 * -INTERVAL '3' HOURS AS result2; + result1 | result2 +---------------+-------------- +-0 08:00:00.0 | +0 15:00:00.0 +; + +intervalAndFieldMultiply +schema::languages:byte|result:string +SELECT languages, CAST (languages * INTERVAL '1 10:30' DAY TO MINUTES AS string) AS result FROM test_emp ORDER BY emp_no LIMIT 5; + + languages | result +---------------+--------------------------------------------- +2 | +2 21:00:00.0 +5 | +7 04:30:00.0 +4 | +5 18:00:00.0 +5 | +7 04:30:00.0 +1 | +1 10:30:00.0 +; + dateMinusInterval SELECT CAST('2018-05-13T12:34:56' AS DATETIME) - INTERVAL '2-8' YEAR TO MONTH AS result; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java index b6bfaa4acb63d..5705bb4d85ab4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java @@ -164,7 +164,7 @@ protected Object doProcess(Object left, Object right) { return null; } - if (f == BinaryArithmeticOperation.MUL || f == BinaryArithmeticOperation.DIV || f == BinaryArithmeticOperation.MOD) { + if (f == BinaryArithmeticOperation.DIV || f == BinaryArithmeticOperation.MOD) { if (!(left instanceof Number)) { throw new SqlIllegalArgumentException("A number is required; received {}", left); } @@ -176,8 +176,8 @@ protected Object doProcess(Object left, Object right) { return f.apply(left, right); } - if (f == BinaryArithmeticOperation.ADD || f == BinaryArithmeticOperation.SUB) { - return f.apply(left, right); + if (f == BinaryArithmeticOperation.ADD || f == BinaryArithmeticOperation.SUB || f == BinaryArithmeticOperation.MUL) { + return f.apply(left, right); } // this should not occur diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java index 5be5e28718459..5b1076592d859 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java @@ -43,7 +43,7 @@ protected TypeResolution resolveType() { // 2. 3. 4. intervals if ((DataTypes.isInterval(l) || DataTypes.isInterval(r))) { if (DataTypeConversion.commonType(l, r) == null) { - return new TypeResolution(format("[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); + return new TypeResolution(format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); } else { return resolveWithIntervals(); } @@ -54,6 +54,12 @@ protected TypeResolution resolveType() { } protected TypeResolution resolveWithIntervals() { + DataType l = left().dataType(); + DataType r = right().dataType(); + + if (!(r.isDateOrTimeBased() || DataTypes.isInterval(r))|| !(l.isDateOrTimeBased() || DataTypes.isInterval(l))) { + return new TypeResolution(format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); + } return TypeResolution.TYPE_RESOLVED; } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java index 7a09bbedebfa3..e3fa7ac1031f7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java @@ -47,7 +47,7 @@ protected TypeResolution resolveType() { return TypeResolution.TYPE_RESOLVED; } - return new TypeResolution(format("[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); + return new TypeResolution(format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java index ee3ca6aa6773b..a47b9cc973122 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java @@ -34,6 +34,10 @@ protected Sub replaceChildren(Expression newLeft, Expression newRight) { @Override protected TypeResolution resolveWithIntervals() { + TypeResolution resolution = super.resolveWithIntervals(); + if (resolution.unresolved()) { + return resolution; + } if ((right().dataType().isDateOrTimeBased()) && DataTypes.isInterval(left().dataType())) { return new TypeResolution(format(null, "Cannot subtract a {}[{}] from an interval[{}]; do you mean the reverse?", right().dataType().typeName, right().source().text(), left().source().text())); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java index 40a03e26eb0ef..5fd1867aeb27a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java @@ -121,6 +121,17 @@ public static DataType commonType(DataType left, DataType right) { return right; } } + // Interval * integer is a valid operation + if (DataTypes.isInterval(left)) { + if (right.isInteger()) { + return left; + } + } + if (DataTypes.isInterval(right)) { + if (left.isInteger()) { + return right; + } + } if (DataTypes.isInterval(left)) { // intervals widening if (DataTypes.isInterval(right)) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index 609e6a52c3e0f..f10b1a402708f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -241,6 +241,27 @@ public void testSubtractFromInterval() { error("SELECT INTERVAL 1 MONTH - CAST('12:23:56.789' AS TIME)")); } + public void testAddIntervalAndNumberNotAllowed() { + assertEquals("1:8: [+] has arguments with incompatible types [INTERVAL_DAY] and [INTEGER]", + error("SELECT INTERVAL 1 DAY + 100")); + assertEquals("1:8: [+] has arguments with incompatible types [INTEGER] and [INTERVAL_DAY]", + error("SELECT 100 + INTERVAL 1 DAY")); + } + + public void testSubtractIntervalAndNumberNotAllowed() { + assertEquals("1:8: [-] has arguments with incompatible types [INTERVAL_MINUTE] and [DOUBLE]", + error("SELECT INTERVAL 10 MINUTE - 100.0")); + assertEquals("1:8: [-] has arguments with incompatible types [DOUBLE] and [INTERVAL_MINUTE]", + error("SELECT 100.0 - INTERVAL 10 MINUTE")); + } + + public void testMultiplyIntervalWithDecimalNotAllowed() { + assertEquals("1:8: [*] has arguments with incompatible types [INTERVAL_MONTH] and [DOUBLE]", + error("SELECT INTERVAL 1 MONTH * 1.234")); + assertEquals("1:8: [*] has arguments with incompatible types [DOUBLE] and [INTERVAL_MONTH]", + error("SELECT 1.234 * INTERVAL 1 MONTH")); + } + public void testMultipleColumns() { assertEquals("1:43: Unknown column [xxx]\nline 1:8: Unknown column [xxx]", error("SELECT xxx FROM test GROUP BY DAY_oF_YEAR(xxx)")); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index 447c820c8e421..7ca4d0058325f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -628,6 +628,10 @@ public void testCommonType() { assertEquals(FLOAT, commonType(FLOAT, INTEGER)); assertEquals(DOUBLE, commonType(DOUBLE, FLOAT)); + // numeric and intervals + assertEquals(INTERVAL_YEAR_TO_MONTH, commonType(INTERVAL_YEAR_TO_MONTH, LONG)); + assertEquals(INTERVAL_HOUR_TO_MINUTE, commonType(INTEGER, INTERVAL_HOUR_TO_MINUTE)); + // dates/datetimes and intervals assertEquals(DATETIME, commonType(DATE, DATETIME)); assertEquals(DATETIME, commonType(DATETIME, DATE)); From 230ae18edb9310ac3dfef91490b459ba2a73918f Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 15 May 2019 18:13:04 -0400 Subject: [PATCH 105/321] Don't verify evictions in testFilterCacheStats (#42091) If a background merge and refresh happens after a search but before a stats query, then evictions will be non-zero. Closes #32506 --- .../java/org/elasticsearch/indices/stats/IndexStatsIT.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index a3697af50b0b6..59e7c21a3e6e8 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -62,7 +62,6 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.ArrayList; @@ -1008,8 +1007,6 @@ private void assertCumulativeQueryCacheStats(IndicesStatsResponse response) { assertEquals(total, shardTotal); } - @TestLogging("_root:DEBUG") // this fails at a very low rate on CI: https://github.com/elastic/elasticsearch/issues/32506 - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/32506") public void testFilterCacheStats() throws Exception { Settings settings = Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build(); assertAcked(prepareCreate("index").setSettings(settings).get()); @@ -1034,7 +1031,6 @@ public void testFilterCacheStats() throws Exception { IndicesStatsResponse stats = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeQueryCacheStats(stats); assertThat(stats.getTotal().queryCache.getHitCount(), equalTo(0L)); - assertThat(stats.getTotal().queryCache.getEvictions(), equalTo(0L)); assertThat(stats.getTotal().queryCache.getMissCount(), greaterThan(0L)); assertThat(stats.getTotal().queryCache.getCacheSize(), greaterThan(0L)); }); @@ -1045,7 +1041,6 @@ public void testFilterCacheStats() throws Exception { IndicesStatsResponse stats = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeQueryCacheStats(stats); assertThat(stats.getTotal().queryCache.getHitCount(), greaterThan(0L)); - assertThat(stats.getTotal().queryCache.getEvictions(), equalTo(0L)); assertThat(stats.getTotal().queryCache.getMissCount(), greaterThan(0L)); assertThat(stats.getTotal().queryCache.getCacheSize(), greaterThan(0L)); }); From bc0b0f55a26286413e1729e15def482775ec1574 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 16 May 2019 09:48:57 -0400 Subject: [PATCH 106/321] Hide bwc build output on success (#42102) Previously we used LoggedExec for running the internal bwc builds. However, this had bad performance implications as all the output was buffered into memory, thus we changed back to normal Exec. This commit adds a `spoolOutput` setting to LoggedExec which can be used for commands with large amounts of output, and switches the bwc builds to use this flag. --- .../gradle/LazyFileOutputStream.java | 67 +++++++++++++++++ .../org/elasticsearch/gradle/LoggedExec.java | 72 +++++++++++++------ distribution/bwc/build.gradle | 3 +- 3 files changed, 118 insertions(+), 24 deletions(-) create mode 100644 buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java new file mode 100644 index 0000000000000..d3101868e84b6 --- /dev/null +++ b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +/** + * An outputstream to a File that is lazily opened on the first write. + */ +class LazyFileOutputStream extends OutputStream { + private OutputStream delegate; + + LazyFileOutputStream(File file) { + // use an initial dummy delegate to avoid doing a conditional on every write + this.delegate = new OutputStream() { + private void bootstrap() throws IOException { + file.getParentFile().mkdirs(); + delegate = new FileOutputStream(file); + } + @Override + public void write(int b) throws IOException { + bootstrap(); + delegate.write(b); + } + @Override + public void write(byte b[], int off, int len) throws IOException { + bootstrap(); + delegate.write(b, off, len); + } + }; + } + + @Override + public void write(int b) throws IOException { + delegate.write(b); + } + + @Override + public void write(byte b[], int off, int len) throws IOException { + delegate.write(b, off, len); + } + + @Override + public void close() throws IOException { + delegate.close(); + } +} diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java index 8dd59170039eb..a3f8757293204 100644 --- a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java @@ -3,14 +3,22 @@ import org.gradle.api.Action; import org.gradle.api.GradleException; import org.gradle.api.Project; +import org.gradle.api.logging.Logger; import org.gradle.api.tasks.Exec; +import org.gradle.api.tasks.Internal; import org.gradle.process.BaseExecSpec; import org.gradle.process.ExecResult; import org.gradle.process.ExecSpec; import org.gradle.process.JavaExecSpec; import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.function.Consumer; import java.util.function.Function; /** @@ -19,35 +27,53 @@ @SuppressWarnings("unchecked") public class LoggedExec extends Exec { + private Consumer outputLogger; + public LoggedExec() { - ByteArrayOutputStream output = new ByteArrayOutputStream(); - ByteArrayOutputStream error = new ByteArrayOutputStream(); + if (getLogger().isInfoEnabled() == false) { - setStandardOutput(output); - setErrorOutput(error); setIgnoreExitValue(true); - doLast((unused) -> { - if (getExecResult().getExitValue() != 0) { - try { - getLogger().error("Standard output:"); - getLogger().error(output.toString("UTF-8")); - getLogger().error("Standard error:"); - getLogger().error(error.toString("UTF-8")); - } catch (UnsupportedEncodingException e) { - throw new GradleException("Failed to read exec output", e); - } - throw new GradleException( - String.format( - "Process '%s %s' finished with non-zero exit value %d", - getExecutable(), - getArgs(), - getExecResult().getExitValue() - ) - ); + setSpoolOutput(false); + doLast(task -> { + if (getExecResult().getExitValue() != 0) { + try { + getLogger().error("Output for " + getExecutable() + ":"); + outputLogger.accept(getLogger()); + } catch (Exception e) { + throw new GradleException("Failed to read exec output", e); } + throw new GradleException( + String.format( + "Process '%s %s' finished with non-zero exit value %d", + getExecutable(), + getArgs(), + getExecResult().getExitValue() + ) + ); + } + }); + } + } + + @Internal + public void setSpoolOutput(boolean spoolOutput) { + final OutputStream out; + if (spoolOutput) { + File spoolFile = new File(getProject().getBuildDir() + "/buffered-output/" + this.getName()); + out = new LazyFileOutputStream(spoolFile); + outputLogger = logger -> { + try { + Files.lines(spoolFile.toPath()).forEach(logger::error); + } catch (IOException e) { + throw new RuntimeException("could not log", e); } - ); + }; + } else { + out = new ByteArrayOutputStream(); + outputLogger = logger -> logger.error(((ByteArrayOutputStream) getStandardOutput()).toString(StandardCharsets.UTF_8)); } + setStandardOutput(out); + setErrorOutput(out); } public static ExecResult exec(Project project, Action action) { diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 8285d8dae2b0d..87644fb7f6785 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -121,8 +121,9 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased } Closure createRunBwcGradleTask = { name, extraConfig -> - return tasks.create(name: "$name", type: Exec) { + return tasks.create(name: "$name", type: LoggedExec) { dependsOn checkoutBwcBranch, writeBuildMetadata + spoolOutput = true workingDir = checkoutDir doFirst { // Execution time so that the checkouts are available From f16209ada7caf29be6caf12319a4c24798f7ca7a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 16 May 2019 09:52:13 -0400 Subject: [PATCH 107/321] Deprecate the native realm migration tool (#42142) The migrate tool was added when the native realm was created, to aid users in converting from file realms that were per node, into the cluster managed native realm. While this tool was useful at the time, users should now be using the native realm directly. This commit deprecates the tool, to be removed in a followup for 8.0. --- docs/reference/commands/migrate-tool.asciidoc | 2 ++ .../xpack/security/authc/esnative/ESNativeRealmMigrateTool.java | 1 + 2 files changed, 3 insertions(+) diff --git a/docs/reference/commands/migrate-tool.asciidoc b/docs/reference/commands/migrate-tool.asciidoc index a1903ac69dacf..2c2f4abf4333b 100644 --- a/docs/reference/commands/migrate-tool.asciidoc +++ b/docs/reference/commands/migrate-tool.asciidoc @@ -3,6 +3,8 @@ [[migrate-tool]] == elasticsearch-migrate +deprecated:[7.2.0, "This tool is deprecated. Use the native realm directly."] + The `elasticsearch-migrate` command migrates existing file-based users and roles to the native realm. From 5.0 onward, you should use the `native` realm to manage roles and local users. diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java index 6368f4a7510c9..0fbe54d7c1066 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java @@ -133,6 +133,7 @@ protected void printAdditionalHelp(Terminal terminal) { // Visible for testing @Override public void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + terminal.println("Warning: The migrate tool is deprecated. Use the native realm directly instead of file realms."); terminal.println("starting migration of users and roles..."); importUsers(terminal, env, options); importRoles(terminal, env, options); From 06cfc7ad43041b607cd2236c013b86b4db47b21b Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Thu, 16 May 2019 10:10:23 -0400 Subject: [PATCH 108/321] [DOCS] add a warning about bypassing PUT API's, update example responses (#42062) Configurations are stored in the .data-frame-internal-1 index, but users should not add configurations directly to the index as additional information to enable access control is added. This adds a warning against allowing access to the internal index. --- .../apis/get-transform-stats.asciidoc | 36 +++++++++++-------- .../data-frames/apis/get-transform.asciidoc | 21 ++++++++--- .../data-frames/apis/put-transform.asciidoc | 14 +++++--- 3 files changed, 48 insertions(+), 23 deletions(-) diff --git a/docs/reference/data-frames/apis/get-transform-stats.asciidoc b/docs/reference/data-frames/apis/get-transform-stats.asciidoc index 85e5001b13a9a..09c383f249488 100644 --- a/docs/reference/data-frames/apis/get-transform-stats.asciidoc +++ b/docs/reference/data-frames/apis/get-transform-stats.asciidoc @@ -65,27 +65,35 @@ The API returns the following results: { "id" : "ecommerce_transform", "state" : { + "task_state" : "started", "indexer_state" : "started", - "task_state": "started", - "current_position" : { - "customer_id" : "9" - }, - "generation" : 1 + "checkpoint" : 1, + "progress" : { + "total_docs" : 1220, + "docs_remaining" : 0, + "percent_complete" : 100.0 + } }, "stats" : { - "pages_processed" : 0, - "documents_processed" : 0, - "documents_indexed" : 0, - "trigger_count" : 0, - "index_time_in_ms" : 0, - "index_total" : 0, + "pages_processed" : 2, + "documents_processed" : 1220, + "documents_indexed" : 13, + "trigger_count" : 1, + "index_time_in_ms" : 19, + "index_total" : 1, "index_failures" : 0, - "search_time_in_ms" : 0, - "search_total" : 0, + "search_time_in_ms" : 52, + "search_total" : 2, "search_failures" : 0 + }, + "checkpointing" : { + "current" : { + "timestamp_millis" : 1557474786393 + }, + "operations_behind" : 0 } } ] } ---- -// TESTRESPONSE \ No newline at end of file +// TESTRESPONSE diff --git a/docs/reference/data-frames/apis/get-transform.asciidoc b/docs/reference/data-frames/apis/get-transform.asciidoc index 85e56aa21cdd1..e2b5c5eccb7da 100644 --- a/docs/reference/data-frames/apis/get-transform.asciidoc +++ b/docs/reference/data-frames/apis/get-transform.asciidoc @@ -75,10 +75,20 @@ The API returns the following results: "transforms" : [ { "id" : "ecommerce_transform", - "source" : "kibana_sample_data_ecommerce", - "dest" : "kibana_sample_data_ecommerce_transform", - "query" : { - "match_all" : { } + "source" : { + "index" : [ + "kibana_sample_data_ecommerce" + ], + "query" : { + "term" : { + "geoip.continent_name" : { + "value" : "Asia" + } + } + } + }, + "dest" : { + "index" : "kibana_sample_data_ecommerce_transform" }, "pivot" : { "group_by" : { @@ -95,7 +105,8 @@ The API returns the following results: } } } - } + }, + "description" : "Maximum priced ecommerce data by customer_id in Asia" } ] } diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index 222d93dfe4256..f452c38ab4c94 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -15,7 +15,13 @@ Instantiates a {dataframe-transform}. `PUT _data_frame/transforms/` -//===== Description +===== Description + +IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. + Do not put a {dataframe-transform} directly into any + `.data-frame-internal*` indices using the Elasticsearch index API. + If {es} {security-features} are enabled, do not give users any + privileges on `.data-frame-internal*` indices. ==== Path Parameters @@ -27,12 +33,12 @@ Instantiates a {dataframe-transform}. ==== Request Body -`source`:: (object) The source configuration, consisting of `index` and optionally +`source` (required):: (object) The source configuration, consisting of `index` and optionally a `query`. -`dest`:: (object) The destination configuration, consisting of `index`. +`dest` (required):: (object) The destination configuration, consisting of `index`. -`pivot`:: Defines the pivot function `group by` fields and the aggregation to +`pivot`:: (object) Defines the pivot function `group by` fields and the aggregation to reduce the data. `description`:: Optional free text description of the data frame transform From 178ebd73c8706fe8b533265acc8b3b6a32defca2 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 16 May 2019 13:11:23 -0400 Subject: [PATCH 109/321] [ML] Improve message misformation error in file structure finder (#42175) This change replaces the extremely unfriendly message "Number of messages analyzed must be positive" in the case where the sample lines were incorrectly grouped into just one message to an error that more helpfully explains the likely root cause of the problem. --- .../TextLogFileStructureFinder.java | 6 ++++++ .../TextLogFileStructureFinderTests.java | 21 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java index b476e3e465463..36e5e91b4326b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java @@ -76,6 +76,12 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex } // Don't add the last message, as it might be partial and mess up subsequent pattern finding + if (sampleMessages.isEmpty()) { + throw new IllegalArgumentException("Failed to create more than one message from the sample lines provided. (The " + + "last is discarded in case the sample is incomplete.) If your sample does contain multiple messages the " + + "problem is probably that the primary timestamp format has been incorrectly detected, so try overriding it."); + } + FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.SEMI_STRUCTURED_TEXT) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java index 7ed5518c65077..6cf4d61cf176c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java @@ -232,6 +232,27 @@ public void testCreateConfigsGivenElasticsearchLogAndImpossibleGrokPatternOverri "\\[%{JAVACLASS:class} *\\] %{JAVALOGMESSAGE:message}] does not match sample messages", e.getMessage()); } + public void testErrorOnIncorrectMessageFormation() { + + // This sample causes problems because the (very weird) primary timestamp format + // is not detected but a secondary format that only occurs in one line is detected + String sample = "Day 21 Month 1 Year 2019 11:04 INFO [localhost] - starting\n" + + "Day 21 Month 1 Year 2019 11:04 INFO [localhost] - startup date [Mon Jan 21 11:04:19 CET 2019]\n" + + "Day 21 Month 1 Year 2019 11:04 DEBUG [localhost] - details\n" + + "Day 21 Month 1 Year 2019 11:04 DEBUG [localhost] - more details\n" + + "Day 21 Month 1 Year 2019 11:04 WARN [localhost] - something went wrong\n"; + + String charset = randomFrom(POSSIBLE_CHARSETS); + Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> factory.createFromSample(explanation, sample, charset, hasByteOrderMarker, FileStructureOverrides.EMPTY_OVERRIDES, + NOOP_TIMEOUT_CHECKER)); + + assertEquals("Failed to create more than one message from the sample lines provided. (The last is discarded in " + + "case the sample is incomplete.) If your sample does contain multiple messages the problem is probably that " + + "the primary timestamp format has been incorrectly detected, so try overriding it.", e.getMessage()); + } + public void testCreateMultiLineMessageStartRegexGivenNoPrefaces() { for (TimestampFormatFinder.CandidateTimestampFormat candidateTimestampFormat : TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS) { String simpleDateRegex = candidateTimestampFormat.simplePattern.pattern(); From f3dbfdb444d50f6034c994b3bb46be82ea6556c6 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 16 May 2019 13:36:09 -0400 Subject: [PATCH 110/321] Clarify rolling upgrade fallback to restart upgrade (#42161) Adds a note that restarting half-or-more of the master-eligible nodes means you're no longer doing a rolling upgrade, and may need to upgrade all the things before the cluster returns to health. --- .../upgrade/rolling_upgrade.asciidoc | 27 ++++++++++++------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index 041b184570ab6..2cf1061e67ba7 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -168,20 +168,29 @@ include::open-ml.asciidoc[] During a rolling upgrade, the cluster continues to operate normally. However, any new functionality is disabled or operates in a backward compatible mode -until all nodes in the cluster are upgraded. New functionality -becomes operational once the upgrade is complete and all nodes are running the -new version. Once that has happened, there's no way to return to operating -in a backward compatible mode. Nodes running the previous major version will -not be allowed to join the fully-updated cluster. +until all nodes in the cluster are upgraded. New functionality becomes +operational once the upgrade is complete and all nodes are running the new +version. Once that has happened, there's no way to return to operating in a +backward compatible mode. Nodes running the previous major version will not be +allowed to join the fully-updated cluster. In the unlikely case of a network malfunction during the upgrade process that -isolates all remaining old nodes from the cluster, you must take the -old nodes offline and upgrade them to enable them to join the cluster. +isolates all remaining old nodes from the cluster, you must take the old nodes +offline and upgrade them to enable them to join the cluster. + +If you stop half or more of the master-eligible nodes all at once during the +upgrade then the cluster will become unavailable, meaning that the upgrade is +no longer a _rolling_ upgrade. If this happens, you should upgrade and restart +all of the stopped master-eligible nodes to allow the cluster to form again, as +if performing a <>. It may also +be necessary to upgrade all of the remaining old nodes before they can join the +cluster after it re-forms. Similarly, if you run a testing/development environment with only one master node, the master node should be upgraded last. Restarting a single master node forces the cluster to be reformed. The new cluster will initially only have the upgraded master node and will thus reject the older nodes when they re-join the -cluster. Nodes that have already been upgraded will successfully re-join the -upgraded master. +cluster. Nodes that have already been upgraded will successfully re-join the +upgraded master. + ==================================================== From 41739a1f4b6617001f1c6d1cca0f83016d1b5b46 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 16 May 2019 15:35:27 -0400 Subject: [PATCH 111/321] Use local outputstream reference (#42180) This commit fixes the logging in LoggedExec which uses an in memory buffer to read from a local reference, instead of with getStandardOutput() of the Exec task. This is due to gradle internally wrapping with a TeeOutputStream, breaking our cast. --- .../minimumRuntime/org/elasticsearch/gradle/LoggedExec.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java index a3f8757293204..233431ff3c497 100644 --- a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java @@ -70,7 +70,7 @@ public void setSpoolOutput(boolean spoolOutput) { }; } else { out = new ByteArrayOutputStream(); - outputLogger = logger -> logger.error(((ByteArrayOutputStream) getStandardOutput()).toString(StandardCharsets.UTF_8)); + outputLogger = logger -> logger.error(((ByteArrayOutputStream) out).toString(StandardCharsets.UTF_8)); } setStandardOutput(out); setErrorOutput(out); From 449679421020405ffa54ff59a982e32dddf2102d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 16 May 2019 15:38:15 -0400 Subject: [PATCH 112/321] Protect logged exec spooling from no output (#42177) This commit adds a guard around reading the spooled LoggedExec output. It is possible the exec command did not output anything, and failed, which would trigger a failure to read the output file. --- .../minimumRuntime/org/elasticsearch/gradle/LoggedExec.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java index 233431ff3c497..c71b7ba183562 100644 --- a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java @@ -63,7 +63,10 @@ public void setSpoolOutput(boolean spoolOutput) { out = new LazyFileOutputStream(spoolFile); outputLogger = logger -> { try { - Files.lines(spoolFile.toPath()).forEach(logger::error); + // the file may not exist if the command never output anything + if (Files.exists(spoolFile.toPath())) { + Files.lines(spoolFile.toPath()).forEach(logger::error); + } } catch (IOException e) { throw new RuntimeException("could not log", e); } From 6cea3180331669198f8db4ef699a46d7e0693a39 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Thu, 16 May 2019 21:24:46 -0400 Subject: [PATCH 113/321] Wildfly tests use rest high level client (#42186) This change updates the Wildfly qa tests to make use of the RestHighLevelClient in place of the transport client to pave the way for the future removal of the transport client. --- qa/wildfly/build.gradle | 9 +++--- ...java => RestHighLevelClientActivator.java} | 4 +-- ... RestHighLevelClientEmployeeResource.java} | 19 ++++++++----- ....java => RestHighLevelClientProducer.java} | 28 ++++++------------- ... => RestHighLevelJacksonJsonProvider.java} | 2 +- .../org/elasticsearch/wildfly/WildflyIT.java | 2 +- 6 files changed, 28 insertions(+), 36 deletions(-) rename qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/{TransportClientActivator.java => RestHighLevelClientActivator.java} (87%) rename qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/{TransportClientEmployeeResource.java => RestHighLevelClientEmployeeResource.java} (84%) rename qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/{TransportClientProducer.java => RestHighLevelClientProducer.java} (58%) rename qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/{TransportJacksonJsonProvider.java => RestHighLevelJacksonJsonProvider.java} (92%) diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index dcbf5253bb085..bcb55079b8269 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -70,7 +70,7 @@ dependencies { compile "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" compile "org.apache.logging.log4j:log4j-core:${versions.log4j}" - compile project(path: ':client:transport', configuration: 'runtime') + compile project(path: ':client:rest-high-level') wildfly "org.jboss:wildfly:${wildflyVersion}@zip" testCompile "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}" } @@ -93,8 +93,7 @@ task writeElasticsearchProperties { final File elasticsearchProperties = file("${wildflyInstall}/standalone/configuration/elasticsearch.properties") elasticsearchProperties.write( [ - "transport.uri=${-> integTest.getNodes().get(0).transportUri()}", - "cluster.name=${-> integTest.getNodes().get(0).clusterName}" + "http.uri=${-> integTest.getNodes().get(0).httpUri()}" ].join("\n")) } } @@ -167,7 +166,7 @@ task startWildfly { } } -task configureTransportClient(type: LoggedExec) { +task configureClient(type: LoggedExec) { dependsOn startWildfly // we skip these tests on Windows so we do not need to worry about compatibility here commandLine "${wildflyInstall}/bin/jboss-cli.sh", @@ -182,7 +181,7 @@ task stopWildfly(type: LoggedExec) { } if (!Os.isFamily(Os.FAMILY_WINDOWS)) { - integTestRunner.dependsOn(configureTransportClient) + integTestRunner.dependsOn(configureClient) final TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() { @Override void afterExecute(final Task task, final TaskState state) { diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientActivator.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientActivator.java similarity index 87% rename from qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientActivator.java rename to qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientActivator.java index 881b263f35b97..c860f9e5e1bf7 100644 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientActivator.java +++ b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientActivator.java @@ -26,11 +26,11 @@ import java.util.Set; @ApplicationPath("/transport") -public class TransportClientActivator extends Application { +public class RestHighLevelClientActivator extends Application { @Override public Set> getClasses() { - return Collections.singleton(TransportClientEmployeeResource.class); + return Collections.singleton(RestHighLevelClientEmployeeResource.class); } } diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientEmployeeResource.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java similarity index 84% rename from qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientEmployeeResource.java rename to qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java index 4008bf8801a55..d99810a9638cc 100644 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientEmployeeResource.java +++ b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java @@ -19,9 +19,12 @@ package org.elasticsearch.wildfly.transport; +import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.wildfly.model.Employee; @@ -33,7 +36,6 @@ import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; - import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -44,17 +46,17 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @Path("/employees") -public class TransportClientEmployeeResource { +public class RestHighLevelClientEmployeeResource { @Inject - private TransportClient client; + private RestHighLevelClient client; @GET @Path("/{id}") @Produces(MediaType.APPLICATION_JSON) - public Response getEmployeeById(final @PathParam("id") Long id) { + public Response getEmployeeById(final @PathParam("id") Long id) throws IOException { Objects.requireNonNull(id); - final GetResponse response = client.prepareGet("megacorp", "employee", Long.toString(id)).get(); + final GetResponse response = client.get(new GetRequest("megacorp", Long.toString(id)), RequestOptions.DEFAULT); if (response.isExists()) { final Map source = response.getSource(); final Employee employee = new Employee(); @@ -94,7 +96,10 @@ public Response putEmployeeById(final @PathParam("id") Long id, final Employee e } } builder.endObject(); - final IndexResponse response = client.prepareIndex("megacorp", "employee", Long.toString(id)).setSource(builder).get(); + final IndexRequest request = new IndexRequest("megacorp"); + request.id(Long.toString(id)); + request.source(builder); + final IndexResponse response = client.index(request, RequestOptions.DEFAULT); if (response.status().getStatus() == 201) { return Response.created(new URI("/employees/" + id)).build(); } else { diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientProducer.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java similarity index 58% rename from qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientProducer.java rename to qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java index 7c234bce6cdb7..5d924192342ef 100644 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportClientProducer.java +++ b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java @@ -19,46 +19,34 @@ package org.elasticsearch.wildfly.transport; -import org.elasticsearch.client.transport.TransportClient; +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.transport.client.PreBuiltTransportClient; import javax.enterprise.inject.Produces; - import java.io.IOException; import java.io.InputStream; -import java.net.InetAddress; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Collections; import java.util.Properties; @SuppressWarnings("unused") -public final class TransportClientProducer { +public final class RestHighLevelClientProducer { @Produces - public TransportClient createTransportClient() throws IOException { + public RestHighLevelClient createRestHighLevelClient() throws IOException { final String elasticsearchProperties = System.getProperty("elasticsearch.properties"); final Properties properties = new Properties(); - final String transportUri; - final String clusterName; + final String httpUri; try (InputStream is = Files.newInputStream(getPath(elasticsearchProperties))) { properties.load(is); - transportUri = properties.getProperty("transport.uri"); - clusterName = properties.getProperty("cluster.name"); + httpUri = properties.getProperty("http.uri"); } - final int lastColon = transportUri.lastIndexOf(':'); - final String host = transportUri.substring(0, lastColon); - final int port = Integer.parseInt(transportUri.substring(lastColon + 1)); - final Settings settings = Settings.builder().put("cluster.name", clusterName).build(); - final TransportClient transportClient = new PreBuiltTransportClient(settings, Collections.emptyList()); - transportClient.addTransportAddress(new TransportAddress(InetAddress.getByName(host), port)); - return transportClient; + return new RestHighLevelClient(RestClient.builder(HttpHost.create(httpUri))); } @SuppressForbidden(reason = "get path not configured in environment") diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportJacksonJsonProvider.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java similarity index 92% rename from qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportJacksonJsonProvider.java rename to qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java index 07585780c0665..50568790ca064 100644 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/TransportJacksonJsonProvider.java +++ b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java @@ -24,5 +24,5 @@ import javax.ws.rs.ext.Provider; @Provider -public class TransportJacksonJsonProvider extends ResteasyJackson2Provider { +public class RestHighLevelJacksonJsonProvider extends ResteasyJackson2Provider { } diff --git a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java index 9aebffdc4ce3f..28e11f021a1c7 100644 --- a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java +++ b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java @@ -53,7 +53,7 @@ @TestRuleLimitSysouts.Limit(bytes = 14000) public class WildflyIT extends LuceneTestCase { - Logger logger = Logger.getLogger(WildflyIT.class); + private Logger logger = Logger.getLogger(WildflyIT.class); public void testTransportClient() throws URISyntaxException, IOException { try (CloseableHttpClient client = HttpClientBuilder.create().build()) { From f45a4731bb31962361eb8992aebcdab02b37a636 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 17 May 2019 14:49:05 -0400 Subject: [PATCH 114/321] Remove the migrate tool (#42174) This commit removes the deprecated migrate tool which was used to migrate users from the file realm to native realm when the native realm was first created. --- docs/reference/commands/index.asciidoc | 2 - docs/reference/commands/migrate-tool.asciidoc | 112 ----- .../migration/migrate_8_0/security.asciidoc | 8 + .../packaging/util/Archives.java | 1 - .../packaging/util/Packages.java | 1 - .../src/main/bin/elasticsearch-migrate | 10 - .../src/main/bin/elasticsearch-migrate.bat | 19 - .../esnative/ESNativeRealmMigrateTool.java | 399 ------------------ .../esnative/ESNativeMigrateToolTests.java | 175 -------- .../ESNativeRealmMigrateToolTests.java | 149 ------- x-pack/qa/security-migrate-tests/build.gradle | 43 -- x-pack/qa/security-migrate-tests/roles.yml | 22 - .../xpack/security/MigrateToolIT.java | 130 ------ .../xpack/security/MigrateToolTestCase.java | 166 -------- .../test/resources/packaging/utils/xpack.bash | 1 - 15 files changed, 8 insertions(+), 1230 deletions(-) delete mode 100644 docs/reference/commands/migrate-tool.asciidoc delete mode 100755 x-pack/plugin/security/src/main/bin/elasticsearch-migrate delete mode 100644 x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat delete mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java delete mode 100644 x-pack/qa/security-migrate-tests/build.gradle delete mode 100644 x-pack/qa/security-migrate-tests/roles.yml delete mode 100644 x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java delete mode 100644 x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolTestCase.java diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index a13ea58c27d3e..e778366aa58b9 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -9,7 +9,6 @@ tasks from the command line: * <> * <> -* <> * <> * <> * <> @@ -21,7 +20,6 @@ tasks from the command line: include::certgen.asciidoc[] include::certutil.asciidoc[] -include::migrate-tool.asciidoc[] include::node-tool.asciidoc[] include::saml-metadata.asciidoc[] include::setup-passwords.asciidoc[] diff --git a/docs/reference/commands/migrate-tool.asciidoc b/docs/reference/commands/migrate-tool.asciidoc deleted file mode 100644 index 2c2f4abf4333b..0000000000000 --- a/docs/reference/commands/migrate-tool.asciidoc +++ /dev/null @@ -1,112 +0,0 @@ -[role="xpack"] -[testenv="gold+"] -[[migrate-tool]] -== elasticsearch-migrate - -deprecated:[7.2.0, "This tool is deprecated. Use the native realm directly."] - -The `elasticsearch-migrate` command migrates existing file-based users and roles -to the native realm. From 5.0 onward, you should use the `native` realm to -manage roles and local users. - - -[float] -=== Synopsis - -[source,shell] --------------------------------------------------- -bin/elasticsearch-migrate -(native (-U, --url ) -[-h, --help] [-E ] -[-n, --users ] [-r, --roles ] -[-u, --username ] [-p, --password ] -[-s, --silent] [-v, --verbose]) --------------------------------------------------- - -[float] -=== Description - -NOTE: When migrating from Shield 2.x, the `elasticsearch-migrate` tool should be -run prior to upgrading to ensure all roles can be migrated as some may be in a -deprecated format that {xpack} cannot read. The `migrate` tool is available in -Shield 2.4.0 and higher. - -The `elasticsearch-migrate` tool loads the existing file-based users and roles -and calls the user and roles APIs to add them to the native realm. You can -migrate all users and roles, or specify the ones you want to migrate. Users and -roles that already exist in the `native` realm are not replaced or -overridden. If the names you specify with the `--users` and `--roles` options -don't exist in the `file` realm, they are skipped. - -[float] -[[migrate-tool-options]] -=== Parameters -The `native` subcommand supports the following options: - -`-E `:: -Configures a setting. - -`-h, --help`:: -Returns all of the command parameters. - -`-n`, `--users`:: -Comma-separated list of the users that you want to migrate. If this parameter is -not specified, all users are migrated. - -`-p`, `--password`:: -Password to use for authentication with {es}. -//TBD: What is the default if this isn't specified? - -`-r`, `--roles`:: -Comma-separated list of the roles that you want to migrate. If this parameter is -not specified, all roles are migrated. - -`-s, --silent`:: Shows minimal output. - -`-U`, `--url`:: -Endpoint URL of the {es} cluster to which you want to migrate the -file-based users and roles. This parameter is required. - -`-u`, `--username`:: -Username to use for authentication with {es}. -//TBD: What is the default if this isn't specified? - -`-v, --verbose`:: Shows verbose output. - -[float] -=== Examples - -Run the `elasticsearch-migrate` tool when {xpack} is installed. For example: - -[source, sh] ----------------------------------------------------------------------- -$ bin/elasticsearch-migrate native -U http://localhost:9200 -u elastic --p x-pack-test-password -n lee,foo -r role1,role2,role3,role4,foo -starting migration of users and roles... -importing users from [/home/es/config/shield/users]... -found existing users: [test_user, joe3, joe2] -migrating user [lee] -{"user":{"created":true}} -no user [foo] found, skipping -importing roles from [/home/es/config/shield/roles.yml]... -found existing roles: [marvel_user, role_query_fields, admin_role, role3, admin, -remote_marvel_agent, power_user, role_new_format_name_array, role_run_as, -logstash, role_fields, role_run_as1, role_new_format, kibana4_server, user, -transport_client, role1.ab, role_query] -migrating role [role1] -{"role":{"created":true}} -migrating role [role2] -{"role":{"created":true}} -role [role3] already exists, skipping -no role [foo] found, skipping -users and roles imported. ----------------------------------------------------------------------- - -Additionally, the `-E` flag can be used to specify additional settings. For example -to specify a different configuration directory, the command would look like: - -[source, sh] ----------------------------------------------------------------------- -$ bin/elasticsearch-migrate native -U http://localhost:9200 -u elastic --p x-pack-test-password -E path.conf=/etc/elasticsearch ----------------------------------------------------------------------- diff --git a/docs/reference/migration/migrate_8_0/security.asciidoc b/docs/reference/migration/migrate_8_0/security.asciidoc index fcc0a5b22168a..a7cacef8ff017 100644 --- a/docs/reference/migration/migrate_8_0/security.asciidoc +++ b/docs/reference/migration/migrate_8_0/security.asciidoc @@ -25,3 +25,11 @@ The `xpack.security.authz.store.roles.index.cache.max_size` and been removed. These settings have been redundant and deprecated since the 5.2 release of {es}. +[float] +[[migrate-tool-removed]] +==== The `elasticsearch-migrate` tool has been removed + +The `elasticsearch-migrate` tool provided a way to convert file +realm users and roles into the native realm. It has been deprecated +since 7.2.0. Users and roles should now be created in the native +realm directly. diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java index e557b47fb8912..2eb3a288fbcc2 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java @@ -222,7 +222,6 @@ private static void verifyDefaultInstallation(Installation es, Distribution dist "elasticsearch-certgen", "elasticsearch-certutil", "elasticsearch-croneval", - "elasticsearch-migrate", "elasticsearch-saml-metadata", "elasticsearch-setup-passwords", "elasticsearch-sql-cli", diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java index 70ac89dc3b7f5..4d528b96c32e9 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java @@ -244,7 +244,6 @@ private static void verifyDefaultInstallation(Installation es) { "elasticsearch-certgen", "elasticsearch-certutil", "elasticsearch-croneval", - "elasticsearch-migrate", "elasticsearch-saml-metadata", "elasticsearch-setup-passwords", "elasticsearch-sql-cli", diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate deleted file mode 100755 index 183722d9c9364..0000000000000 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -# or more contributor license agreements. Licensed under the Elastic License; -# you may not use this file except in compliance with the Elastic License. - -ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool \ - ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ - "`dirname "$0"`"/elasticsearch-cli \ - "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat deleted file mode 100644 index a50bc1a384ed0..0000000000000 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat +++ /dev/null @@ -1,19 +0,0 @@ -@echo off - -rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -rem or more contributor license agreements. Licensed under the Elastic License; -rem you may not use this file except in compliance with the Elastic License. - -setlocal enabledelayedexpansion -setlocal enableextensions - -set ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool -set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env -call "%~dp0elasticsearch-cli.bat" ^ - %%* ^ - || goto exit - -endlocal -endlocal -:exit -exit /b %ERRORLEVEL% diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java deleted file mode 100644 index 0fbe54d7c1066..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.authc.esnative; - -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.Appender; -import org.apache.logging.log4j.core.LogEvent; -import org.apache.logging.log4j.core.LoggerContext; -import org.apache.logging.log4j.core.appender.AbstractAppender; -import org.apache.logging.log4j.core.config.Configuration; -import org.apache.logging.log4j.core.config.LoggerConfig; -import org.apache.logging.log4j.core.layout.PatternLayout; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.EnvironmentAwareCommand; -import org.elasticsearch.cli.LoggingAwareMultiCommand; -import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cli.Terminal.Verbosity; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.env.Environment; -import org.elasticsearch.xpack.core.common.socket.SocketAccess; -import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.core.ssl.SSLConfiguration; -import org.elasticsearch.xpack.security.authz.store.FileRolesStore; -import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.security.authc.file.FileUserPasswdStore; -import org.elasticsearch.xpack.security.authc.file.FileUserRolesStore; - -import javax.net.ssl.HttpsURLConnection; - -import java.io.BufferedReader; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.net.HttpURLConnection; -import java.net.URI; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; - -/** - * This is the command-line tool used for migrating users and roles from the file-based realm into the new native realm using the API for - * import. It reads from the files and tries its best to add the users, showing an error if it was incapable of importing them. Any existing - * users or roles are skipped. - */ -public class ESNativeRealmMigrateTool extends LoggingAwareMultiCommand { - - public static void main(String[] args) throws Exception { - exit(new ESNativeRealmMigrateTool().main(args, Terminal.DEFAULT)); - } - - public ESNativeRealmMigrateTool() { - super("Imports file-based users and roles to the native security realm"); - subcommands.put("native", newMigrateUserOrRoles()); - } - - protected MigrateUserOrRoles newMigrateUserOrRoles() { - return new MigrateUserOrRoles(); - } - - /** - * Command to migrate users and roles to the native realm - */ - public static class MigrateUserOrRoles extends EnvironmentAwareCommand { - - private final OptionSpec username; - private final OptionSpec password; - private final OptionSpec url; - private final OptionSpec usersToMigrateCsv; - private final OptionSpec rolesToMigrateCsv; - - public MigrateUserOrRoles() { - super("Migrates users or roles from file to native realm"); - this.username = parser.acceptsAll(Arrays.asList("u", "username"), - "User used to authenticate with Elasticsearch") - .withRequiredArg().required(); - this.password = parser.acceptsAll(Arrays.asList("p", "password"), - "Password used to authenticate with Elasticsearch") - .withRequiredArg().required(); - this.url = parser.acceptsAll(Arrays.asList("U", "url"), - "URL of Elasticsearch host") - .withRequiredArg(); - this.usersToMigrateCsv = parser.acceptsAll(Arrays.asList("n", "users"), - "Users to migrate from file to native realm") - .withRequiredArg(); - this.rolesToMigrateCsv = parser.acceptsAll(Arrays.asList("r", "roles"), - "Roles to migrate from file to native realm") - .withRequiredArg(); - } - - // Visible for testing - public OptionParser getParser() { - return this.parser; - } - - @Override - protected void printAdditionalHelp(Terminal terminal) { - terminal.println("This tool migrates file based users[1] and roles[2] to the native realm in"); - terminal.println("elasticsearch, saving the administrator from needing to manually transition"); - terminal.println("them from the file."); - } - - // Visible for testing - @Override - public void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - terminal.println("Warning: The migrate tool is deprecated. Use the native realm directly instead of file realms."); - terminal.println("starting migration of users and roles..."); - importUsers(terminal, env, options); - importRoles(terminal, env, options); - terminal.println("users and roles imported."); - } - - @SuppressForbidden(reason = "We call connect in doPrivileged and provide SocketPermission") - private String postURL(Settings settings, Environment env, String method, String urlString, - OptionSet options, @Nullable String bodyString) throws Exception { - URI uri = new URI(urlString); - URL url = uri.toURL(); - HttpURLConnection conn; - // If using SSL, need a custom service because it's likely a self-signed certificate - if ("https".equalsIgnoreCase(uri.getScheme())) { - final SSLService sslService = new SSLService(settings, env); - final SSLConfiguration sslConfiguration = sslService.getSSLConfiguration("xpack.security.http.ssl"); - final HttpsURLConnection httpsConn = (HttpsURLConnection) url.openConnection(); - AccessController.doPrivileged((PrivilegedAction) () -> { - // Requires permission java.lang.RuntimePermission "setFactory"; - httpsConn.setSSLSocketFactory(sslService.sslSocketFactory(sslConfiguration)); - return null; - }); - conn = httpsConn; - } else { - conn = (HttpURLConnection) url.openConnection(); - } - conn.setRequestMethod(method); - conn.setReadTimeout(30 * 1000); // 30 second timeout - // Add basic-auth header - conn.setRequestProperty("Authorization", - UsernamePasswordToken.basicAuthHeaderValue(username.value(options), - new SecureString(password.value(options).toCharArray()))); - conn.setRequestProperty("Content-Type", XContentType.JSON.mediaType()); - conn.setDoOutput(true); // we'll be sending a body - SocketAccess.doPrivileged(conn::connect); - if (bodyString != null) { - try (OutputStream out = conn.getOutputStream()) { - out.write(bodyString.getBytes(StandardCharsets.UTF_8)); - } catch (Exception e) { - try { - conn.disconnect(); - } catch (Exception e2) { - // Ignore exceptions if we weren't able to close the connection after an error - } - throw e; - } - } - try (BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) { - StringBuilder sb = new StringBuilder(); - String line = null; - while ((line = reader.readLine()) != null) { - sb.append(line); - } - return sb.toString(); - } catch (IOException e) { - try (BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getErrorStream(), StandardCharsets.UTF_8))) { - StringBuilder sb = new StringBuilder(); - String line = null; - while ((line = reader.readLine()) != null) { - sb.append(line); - } - throw new IOException(sb.toString(), e); - } - } finally { - conn.disconnect(); - } - } - - Set getUsersThatExist(Terminal terminal, Settings settings, Environment env, OptionSet options) throws Exception { - Set existingUsers = new HashSet<>(); - String allUsersJson = postURL(settings, env, "GET", this.url.value(options) + "/_security/user/", options, null); - // EMPTY is safe here because we never use namedObject - try (XContentParser parser = JsonXContent.jsonXContent - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, allUsersJson)) { - XContentParser.Token token = parser.nextToken(); - String userName; - if (token == XContentParser.Token.START_OBJECT) { - while ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) { - userName = parser.currentName(); - existingUsers.add(userName); - parser.nextToken(); - parser.skipChildren(); - } - } else { - throw new ElasticsearchException("failed to retrieve users, expecting an object but got: " + token); - } - } - terminal.println("found existing users: " + existingUsers); - return existingUsers; - } - - static String createUserJson(String[] roles, char[] password) throws IOException { - XContentBuilder builder = jsonBuilder(); - builder.startObject(); - { - builder.field("password_hash", new String(password)); - builder.startArray("roles"); - for (String role : roles) { - builder.value(role); - } - builder.endArray(); - } - builder.endObject(); - return Strings.toString(builder); - } - - void importUsers(Terminal terminal, Environment env, OptionSet options) throws FileNotFoundException { - String usersCsv = usersToMigrateCsv.value(options); - String[] usersToMigrate = (usersCsv != null) ? usersCsv.split(",") : Strings.EMPTY_ARRAY; - Path usersFile = FileUserPasswdStore.resolveFile(env); - Path usersRolesFile = FileUserRolesStore.resolveFile(env); - if (Files.exists(usersFile) == false) { - throw new FileNotFoundException("users file [" + usersFile + "] does not exist"); - } else if (Files.exists(usersRolesFile) == false) { - throw new FileNotFoundException("users_roles file [" + usersRolesFile + "] does not exist"); - } - - terminal.println("importing users from [" + usersFile + "]..."); - final Logger logger = getTerminalLogger(terminal); - Map userToHashedPW = FileUserPasswdStore.parseFile(usersFile, logger, env.settings()); - Map userToRoles = FileUserRolesStore.parseFile(usersRolesFile, logger); - Set existingUsers; - try { - existingUsers = getUsersThatExist(terminal, env.settings(), env, options); - } catch (Exception e) { - throw new ElasticsearchException("failed to get users that already exist, skipping user import", e); - } - if (usersToMigrate.length == 0) { - usersToMigrate = userToHashedPW.keySet().toArray(new String[userToHashedPW.size()]); - } - for (String user : usersToMigrate) { - if (userToHashedPW.containsKey(user) == false) { - terminal.println("user [" + user + "] was not found in files, skipping"); - continue; - } else if (existingUsers.contains(user)) { - terminal.println("user [" + user + "] already exists, skipping"); - continue; - } - terminal.println("migrating user [" + user + "]"); - String reqBody = "n/a"; - try { - reqBody = createUserJson(userToRoles.get(user), userToHashedPW.get(user)); - String resp = postURL(env.settings(), env, "POST", - this.url.value(options) + "/_security/user/" + user, options, reqBody); - terminal.println(resp); - } catch (Exception e) { - throw new ElasticsearchException("failed to migrate user [" + user + "] with body: " + reqBody, e); - } - } - } - - Set getRolesThatExist(Terminal terminal, Settings settings, Environment env, OptionSet options) throws Exception { - Set existingRoles = new HashSet<>(); - String allRolesJson = postURL(settings, env, "GET", this.url.value(options) + "/_security/role/", options, null); - // EMPTY is safe here because we never use namedObject - try (XContentParser parser = JsonXContent.jsonXContent - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, allRolesJson)) { - XContentParser.Token token = parser.nextToken(); - String roleName; - if (token == XContentParser.Token.START_OBJECT) { - while ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) { - roleName = parser.currentName(); - existingRoles.add(roleName); - parser.nextToken(); - parser.skipChildren(); - } - } else { - throw new ElasticsearchException("failed to retrieve roles, expecting an object but got: " + token); - } - } - terminal.println("found existing roles: " + existingRoles); - return existingRoles; - } - - static String createRoleJson(RoleDescriptor rd) throws IOException { - XContentBuilder builder = jsonBuilder(); - rd.toXContent(builder, ToXContent.EMPTY_PARAMS, true); - return Strings.toString(builder); - } - - void importRoles(Terminal terminal, Environment env, OptionSet options) throws FileNotFoundException { - String rolesCsv = rolesToMigrateCsv.value(options); - String[] rolesToMigrate = (rolesCsv != null) ? rolesCsv.split(",") : Strings.EMPTY_ARRAY; - Path rolesFile = FileRolesStore.resolveFile(env).toAbsolutePath(); - if (Files.exists(rolesFile) == false) { - throw new FileNotFoundException("roles.yml file [" + rolesFile + "] does not exist"); - } - terminal.println("importing roles from [" + rolesFile + "]..."); - Logger logger = getTerminalLogger(terminal); - Map roles = FileRolesStore.parseRoleDescriptors(rolesFile, logger, true, Settings.EMPTY); - Set existingRoles; - try { - existingRoles = getRolesThatExist(terminal, env.settings(), env, options); - } catch (Exception e) { - throw new ElasticsearchException("failed to get roles that already exist, skipping role import", e); - } - if (rolesToMigrate.length == 0) { - rolesToMigrate = roles.keySet().toArray(new String[roles.size()]); - } - for (String roleName : rolesToMigrate) { - if (roles.containsKey(roleName) == false) { - terminal.println("no role [" + roleName + "] found, skipping"); - continue; - } else if (existingRoles.contains(roleName)) { - terminal.println("role [" + roleName + "] already exists, skipping"); - continue; - } - terminal.println("migrating role [" + roleName + "]"); - String reqBody = "n/a"; - try { - reqBody = createRoleJson(roles.get(roleName)); - String resp = postURL(env.settings(), env, "POST", - this.url.value(options) + "/_security/role/" + roleName, options, reqBody); - terminal.println(resp); - } catch (Exception e) { - throw new ElasticsearchException("failed to migrate role [" + roleName + "] with body: " + reqBody, e); - } - } - } - } - - /** - * Creates a new Logger that is detached from the ROOT logger and only has an appender that will output log messages to the terminal - */ - static Logger getTerminalLogger(final Terminal terminal) { - final Logger logger = LogManager.getLogger(ESNativeRealmMigrateTool.class); - Loggers.setLevel(logger, Level.ALL); - - final LoggerContext ctx = (LoggerContext) LogManager.getContext(false); - final Configuration config = ctx.getConfiguration(); - - // create appender - final Appender appender = new AbstractAppender(ESNativeRealmMigrateTool.class.getName(), null, - PatternLayout.newBuilder() - // Specify the configuration so log4j doesn't re-initialize - .withConfiguration(config) - .withPattern("%m") - .build()) { - @Override - public void append(LogEvent event) { - switch (event.getLevel().getStandardLevel()) { - case FATAL: - case ERROR: - terminal.println(Verbosity.NORMAL, event.getMessage().getFormattedMessage()); - break; - case OFF: - break; - default: - terminal.println(Verbosity.VERBOSE, event.getMessage().getFormattedMessage()); - break; - } - } - }; - appender.start(); - - // get the config, detach from parent, remove appenders, add custom appender - final LoggerConfig loggerConfig = config.getLoggerConfig(ESNativeRealmMigrateTool.class.getName()); - loggerConfig.setParent(null); - loggerConfig.getAppenders().forEach((s, a) -> Loggers.removeAppender(logger, a)); - Loggers.addAppender(logger, appender); - return logger; - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java deleted file mode 100644 index a73fc93f32e45..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.authc.esnative; - -import joptsimple.OptionException; -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.NativeRealmIntegTestCase; -import org.elasticsearch.common.CharArrays; -import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; -import org.junit.BeforeClass; - -import java.nio.charset.StandardCharsets; -import java.nio.file.Path; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; - -import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForNodePEMFiles; -import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForPEMFiles; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.is; - -/** - * Integration tests for the {@code ESNativeMigrateTool} - */ -public class ESNativeMigrateToolTests extends NativeRealmIntegTestCase { - - // Randomly use SSL (or not) - private static boolean useSSL; - - @BeforeClass - public static void setSSL() { - useSSL = randomBoolean(); - } - - @Override - protected boolean addMockHttpTransport() { - return false; // enable http - } - - @Override - public Settings nodeSettings(int nodeOrdinal) { - logger.info("--> use SSL? {}", useSSL); - Settings.Builder builder = Settings.builder() - .put(super.nodeSettings(nodeOrdinal)); - addSSLSettingsForNodePEMFiles(builder, "xpack.security.http.", true); - builder.put("xpack.security.http.ssl.enabled", useSSL); - return builder.build(); - } - - @Override - protected boolean transportSSLEnabled() { - return useSSL; - } - - @Override - protected boolean shouldSetReservedUserPasswords() { - return false; - } - - private Environment nodeEnvironment() throws Exception { - return internalCluster().getInstances(Environment.class).iterator().next(); - } - - public void testRetrieveUsers() throws Exception { - final Environment nodeEnvironment = nodeEnvironment(); - String home = Environment.PATH_HOME_SETTING.get(nodeEnvironment.settings()); - Path conf = nodeEnvironment.configFile(); - SecurityClient c = new SecurityClient(client()); - logger.error("--> creating users"); - int numToAdd = randomIntBetween(1,10); - Set addedUsers = new HashSet<>(numToAdd); - for (int i = 0; i < numToAdd; i++) { - String uname = randomAlphaOfLength(5); - c.preparePutUser(uname, "s3kirt".toCharArray(), getFastStoredHashAlgoForTests(), "role1", "user").get(); - addedUsers.add(uname); - } - logger.error("--> waiting for .security index"); - ensureGreen(RestrictedIndicesNames.SECURITY_MAIN_ALIAS); - - MockTerminal t = new MockTerminal(); - String username = nodeClientUsername(); - String password = new String(CharArrays.toUtf8Bytes(nodeClientPassword().getChars()), StandardCharsets.UTF_8); - String url = getHttpURL(); - ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); - - Settings.Builder builder = Settings.builder() - .put("path.home", home) - .put("path.conf", conf.toString()) - .put("xpack.security.http.ssl.client_authentication", "none"); - addSSLSettingsForPEMFiles( - builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem", - "testnode", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - "xpack.security.http.", - Collections.singletonList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - Settings settings = builder.build(); - logger.error("--> retrieving users using URL: {}, home: {}", url, home); - - OptionParser parser = muor.getParser(); - OptionSet options = parser.parse("-u", username, "-p", password, "-U", url); - logger.info("--> options: {}", options.asMap()); - Set users = muor.getUsersThatExist(t, settings, new Environment(settings, conf), options); - logger.info("--> output: \n{}", t.getOutput()); - for (String u : addedUsers) { - assertThat("expected list to contain: " + u + ", real list: " + users, users.contains(u), is(true)); - } - } - - public void testRetrieveRoles() throws Exception { - final Environment nodeEnvironment = nodeEnvironment(); - String home = Environment.PATH_HOME_SETTING.get(nodeEnvironment.settings()); - Path conf = nodeEnvironment.configFile(); - SecurityClient c = new SecurityClient(client()); - logger.error("--> creating roles"); - int numToAdd = randomIntBetween(1,10); - Set addedRoles = new HashSet<>(numToAdd); - for (int i = 0; i < numToAdd; i++) { - String rname = randomAlphaOfLength(5); - c.preparePutRole(rname) - .cluster("all", "none") - .runAs("root", "nobody") - .addIndices(new String[] { "index" }, new String[] { "read" }, new String[] { "body", "title" }, null, - new BytesArray("{\"query\": {\"match_all\": {}}}"), randomBoolean()) - .get(); - addedRoles.add(rname); - } - logger.error("--> waiting for .security index"); - ensureGreen(RestrictedIndicesNames.SECURITY_MAIN_ALIAS); - - MockTerminal t = new MockTerminal(); - String username = nodeClientUsername(); - String password = new String(CharArrays.toUtf8Bytes(nodeClientPassword().getChars()), StandardCharsets.UTF_8); - String url = getHttpURL(); - ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); - Settings.Builder builder = Settings.builder() - .put("path.home", home) - .put("xpack.security.http.ssl.client_authentication", "none"); - addSSLSettingsForPEMFiles(builder, - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.pem", - "testclient", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt", - "xpack.security.http.", - Collections.singletonList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); - Settings settings = builder.build(); - logger.error("--> retrieving roles using URL: {}, home: {}", url, home); - - OptionParser parser = muor.getParser(); - OptionSet options = parser.parse("-u", username, "-p", password, "-U", url); - Set roles = muor.getRolesThatExist(t, settings, new Environment(settings, conf), options); - logger.info("--> output: \n{}", t.getOutput()); - for (String r : addedRoles) { - assertThat("expected list to contain: " + r, roles.contains(r), is(true)); - } - } - - public void testMissingPasswordParameter() { - ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); - - final OptionException ex = expectThrows(OptionException.class, - () -> muor.getParser().parse("-u", "elastic", "-U", "http://localhost:9200")); - - assertThat(ex.getMessage(), containsString("password")); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java deleted file mode 100644 index 212fd4a8dab42..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.authc.esnative; - -import joptsimple.OptionSet; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.cli.Command; -import org.elasticsearch.cli.CommandTestCase; -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.cli.Terminal.Verbosity; -import org.elasticsearch.cli.UserException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; - -import java.io.FileNotFoundException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.isEmptyString; - -/** - * Unit tests for the {@code ESNativeRealmMigrateTool} - */ -public class ESNativeRealmMigrateToolTests extends CommandTestCase { - - @Override - protected Command newCommand() { - return new ESNativeRealmMigrateTool() { - @Override - protected MigrateUserOrRoles newMigrateUserOrRoles() { - return new MigrateUserOrRoles() { - - @Override - protected Environment createEnv(Map settings) throws UserException { - Settings.Builder builder = Settings.builder(); - settings.forEach((k, v) -> builder.put(k, v)); - return TestEnvironment.newEnvironment(builder.build()); - } - - }; - } - }; - } - - public void testUserJson() throws Exception { - assertThat(ESNativeRealmMigrateTool.MigrateUserOrRoles.createUserJson(Strings.EMPTY_ARRAY, "hash".toCharArray()), - equalTo("{\"password_hash\":\"hash\",\"roles\":[]}")); - assertThat(ESNativeRealmMigrateTool.MigrateUserOrRoles.createUserJson(new String[]{"role1", "role2"}, "hash".toCharArray()), - equalTo("{\"password_hash\":\"hash\",\"roles\":[\"role1\",\"role2\"]}")); - } - - public void testRoleJson() throws Exception { - RoleDescriptor.IndicesPrivileges ip = RoleDescriptor.IndicesPrivileges.builder() - .indices(new String[]{"i1", "i2", "i3"}) - .privileges(new String[]{"all"}) - .grantedFields("body") - .build(); - RoleDescriptor.IndicesPrivileges[] ips = new RoleDescriptor.IndicesPrivileges[1]; - ips[0] = ip; - String[] cluster = Strings.EMPTY_ARRAY; - String[] runAs = Strings.EMPTY_ARRAY; - RoleDescriptor rd = new RoleDescriptor("rolename", cluster, ips, runAs); - assertThat(ESNativeRealmMigrateTool.MigrateUserOrRoles.createRoleJson(rd), - equalTo("{\"cluster\":[]," + - "\"indices\":[{\"names\":[\"i1\",\"i2\",\"i3\"]," + - "\"privileges\":[\"all\"],\"field_security\":{\"grant\":[\"body\"]}," + - "\"allow_restricted_indices\":false}]," + - "\"applications\":[]," + - "\"run_as\":[],\"metadata\":{},\"type\":\"role\"}")); - } - - public void testTerminalLogger() throws Exception { - Logger terminalLogger = ESNativeRealmMigrateTool.getTerminalLogger(terminal); - assertThat(terminal.getOutput(), isEmptyString()); - - // only error and fatal gets logged at normal verbosity - terminal.setVerbosity(Verbosity.NORMAL); - List nonLoggingLevels = new ArrayList<>(Arrays.asList(Level.values())); - nonLoggingLevels.removeAll(Arrays.asList(Level.ERROR, Level.FATAL)); - for (Level level : nonLoggingLevels) { - terminalLogger.log(level, "this level should not log " + level.name()); - assertThat(terminal.getOutput(), isEmptyString()); - } - - terminalLogger.log(Level.ERROR, "logging an error"); - assertEquals("logging an error\n", terminal.getOutput()); - terminal.reset(); - assertThat(terminal.getOutput(), isEmptyString()); - - terminalLogger.log(Level.FATAL, "logging a fatal message"); - assertEquals("logging a fatal message\n", terminal.getOutput()); - terminal.reset(); - assertThat(terminal.getOutput(), isEmptyString()); - - // everything will get logged at verbose! - terminal.setVerbosity(Verbosity.VERBOSE); - List loggingLevels = new ArrayList<>(Arrays.asList(Level.values())); - loggingLevels.remove(Level.OFF); - for (Level level : loggingLevels) { - terminalLogger.log(level, "this level should log " + level.name()); - assertEquals("this level should log " + level.name() + "\n", terminal.getOutput()); - terminal.reset(); - assertThat(terminal.getOutput(), isEmptyString()); - } - } - - public void testMissingFiles() throws Exception { - Path homeDir = createTempDir(); - Path confDir = homeDir.resolve("config"); - Path xpackConfDir = confDir; - Files.createDirectories(xpackConfDir); - - ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); - - OptionSet options = muor.getParser().parse("-u", "elastic", "-p", SecuritySettingsSourceField.TEST_PASSWORD, - "-U", "http://localhost:9200"); - Settings settings = Settings.builder().put("path.home", homeDir).build(); - Environment environment = new Environment(settings, confDir); - - MockTerminal mockTerminal = new MockTerminal(); - - FileNotFoundException fnfe = expectThrows(FileNotFoundException.class, - () -> muor.importUsers(mockTerminal, environment, options)); - assertThat(fnfe.getMessage(), containsString("users file")); - - Files.createFile(xpackConfDir.resolve("users")); - fnfe = expectThrows(FileNotFoundException.class, - () -> muor.importUsers(mockTerminal, environment, options)); - assertThat(fnfe.getMessage(), containsString("users_roles file")); - - fnfe = expectThrows(FileNotFoundException.class, - () -> muor.importRoles(mockTerminal, environment, options)); - assertThat(fnfe.getMessage(), containsString("roles.yml file")); - } -} diff --git a/x-pack/qa/security-migrate-tests/build.gradle b/x-pack/qa/security-migrate-tests/build.gradle deleted file mode 100644 index 1851f0e21b027..0000000000000 --- a/x-pack/qa/security-migrate-tests/build.gradle +++ /dev/null @@ -1,43 +0,0 @@ -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile project(path: xpackModule('security'), configuration: 'runtime') - testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') -} - -integTestCluster { - setting 'xpack.security.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - extraConfigFile 'roles.yml', 'roles.yml' - [ - test_admin: 'superuser', - transport_user: 'superuser', - existing: 'superuser', - bob: 'actual_role' - ].each { String user, String role -> - setupCommand 'setupUser#' + user, - 'bin/elasticsearch-users', 'useradd', user, '-p', 'x-pack-test-password', '-r', role - } - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'test_admin', - password: 'x-pack-test-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } - // TODO: systemProperty('tests.cluster', "${-> cluster.transportPortURI }") when migerating to testclusters -} - -testingConventions { - naming.clear() - naming { - IT { - baseClass 'org.elasticsearch.xpack.security.MigrateToolTestCase' - } - } -} diff --git a/x-pack/qa/security-migrate-tests/roles.yml b/x-pack/qa/security-migrate-tests/roles.yml deleted file mode 100644 index 6e997383f8a5a..0000000000000 --- a/x-pack/qa/security-migrate-tests/roles.yml +++ /dev/null @@ -1,22 +0,0 @@ -# A role that has all sorts of configuration: -# - it can monitor the cluster -# - for index1 and index2 it can do CRUD things and refresh -# - for other indices it has search-only privileges -actual_role: - run_as: [ "joe" ] - cluster: - - monitor - indices: - - names: [ "index1", "index2" ] - privileges: [ "read", "write", "create_index", "indices:admin/refresh" ] - field_security: - grant: - - foo - - bar - query: - bool: - must_not: - match: - hidden: true - - names: "*" - privileges: [ "read" ] diff --git a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java deleted file mode 100644 index 3581bf2fda7fd..0000000000000 --- a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import joptsimple.OptionParser; -import joptsimple.OptionSet; - -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; -import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; -import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; -import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; -import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; -import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; -import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool; -import org.junit.Before; - -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collections; - -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.hamcrest.Matchers.containsString; - -/** - * Integration tests for the {@code elasticsearch-migrate} shell command - */ -public class MigrateToolIT extends MigrateToolTestCase { - - @Before - public void setupUpTest() throws Exception { - Client client = getClient(); - SecurityClient c = new SecurityClient(client); - - // Add an existing user so the tool will skip it - PutUserResponse pur = c.preparePutUser("existing", "s3kirt".toCharArray(), Hasher.BCRYPT, "role1", "user").get(); - assertTrue(pur.created()); - } - - public void testRunMigrateTool() throws Exception { - final String testConfigDir = System.getProperty("tests.config.dir"); - logger.info("--> CONF: {}", testConfigDir); - final Path configPath = PathUtils.get(testConfigDir); - Settings settings = Settings.builder().put("path.home", configPath.getParent()).build(); - // Cluster should already be up - String url = "http://" + getHttpURL(); - logger.info("--> using URL: {}", url); - MockTerminal t = new MockTerminal(); - ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); - OptionParser parser = muor.getParser(); - - OptionSet options = parser.parse("-u", "test_admin", "-p", "x-pack-test-password", "-U", url); - muor.execute(t, options, new Environment(settings, configPath)); - - logger.info("--> output:\n{}", t.getOutput()); - - Client client = getClient(); - SecurityClient c = new SecurityClient(client); - - // Check that the migrated user can be retrieved - GetUsersResponse resp = c.prepareGetUsers("bob").get(); - assertTrue("user 'bob' should exist", resp.hasUsers()); - User bob = resp.users()[0]; - assertEquals(bob.principal(), "bob"); - assertArrayEquals(bob.roles(), new String[]{"actual_role"}); - - // Make sure the existing user did not change - resp = c.prepareGetUsers("existing").get(); - assertTrue("user should exist", resp.hasUsers()); - User existing = resp.users()[0]; - assertEquals(existing.principal(), "existing"); - assertArrayEquals(existing.roles(), new String[]{"role1", "user"}); - - // Make sure the "actual_role" made it in and is correct - GetRolesResponse roleResp = c.prepareGetRoles().names("actual_role").get(); - assertTrue("role should exist", roleResp.hasRoles()); - RoleDescriptor rd = roleResp.roles()[0]; - assertNotNull(rd); - assertEquals(rd.getName(), "actual_role"); - assertArrayEquals(rd.getClusterPrivileges(), new String[]{"monitor"}); - assertArrayEquals(rd.getRunAs(), new String[]{"joe"}); - RoleDescriptor.IndicesPrivileges[] ips = rd.getIndicesPrivileges(); - assertEquals(ips.length, 2); - for (RoleDescriptor.IndicesPrivileges ip : ips) { - final FieldPermissions fieldPermissions = new FieldPermissions( - new FieldPermissionsDefinition(ip.getGrantedFields(), ip.getDeniedFields())); - if (Arrays.equals(ip.getIndices(), new String[]{"index1", "index2"})) { - assertArrayEquals(ip.getPrivileges(), new String[]{"read", "write", "create_index", "indices:admin/refresh"}); - assertTrue(fieldPermissions.hasFieldLevelSecurity()); - assertTrue(fieldPermissions.grantsAccessTo("bar")); - assertTrue(fieldPermissions.grantsAccessTo("foo")); - assertNotNull(ip.getQuery()); - assertThat(ip.getQuery().iterator().next().utf8ToString(), - containsString("{\"bool\":{\"must_not\":{\"match\":{\"hidden\":true}}}}")); - } else { - assertArrayEquals(ip.getIndices(), new String[]{"*"}); - assertArrayEquals(ip.getPrivileges(), new String[]{"read"}); - assertFalse(fieldPermissions.hasFieldLevelSecurity()); - assertNull(ip.getQuery()); - } - } - - // Check that bob can access the things the "actual_role" says he can - String token = basicAuthHeaderValue("bob", new SecureString("x-pack-test-password".toCharArray())); - // Create "index1" index and try to search from it as "bob" - client.filterWithHeader(Collections.singletonMap("Authorization", token)).admin().indices().prepareCreate("index1").get(); - // Wait for the index to be ready so it doesn't fail if no shards are initialized - client.admin().cluster().health(Requests.clusterHealthRequest("index1") - .timeout(TimeValue.timeValueSeconds(30)) - .waitForYellowStatus() - .waitForEvents(Priority.LANGUID) - .waitForNoRelocatingShards(true)) - .actionGet(); - client.filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("index1").get(); - } -} diff --git a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolTestCase.java b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolTestCase.java deleted file mode 100644 index 0111aeff4cca2..0000000000000 --- a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolTestCase.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.file.Path; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.hamcrest.Matchers.notNullValue; - -/** - * {@link MigrateToolTestCase} is an abstract base class to run integration - * tests against an external Elasticsearch Cluster. - *

- * You can define a list of transport addresses from where you can reach your cluster - * by setting "tests.cluster" system property. It defaults to "localhost:9300". - *

- * All tests can be run from maven using mvn install as maven will start an external cluster first. - *

- * If you want to debug this module from your IDE, then start an external cluster by yourself - * then run JUnit. If you changed the default port, set "tests.cluster=localhost:PORT" when running - * your test. - */ -@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") -public abstract class MigrateToolTestCase extends LuceneTestCase { - - /** - * Key used to eventually switch to using an external cluster and provide its transport addresses - */ - public static final String TESTS_CLUSTER = "tests.cluster"; - - /** - * Key used to eventually switch to using an external cluster and provide its transport addresses - */ - public static final String TESTS_HTTP_CLUSTER = "tests.rest.cluster"; - - /** - * Defaults to localhost:9300 - */ - public static final String TESTS_CLUSTER_DEFAULT = "localhost:9300"; - - protected static final Logger logger = LogManager.getLogger(MigrateToolTestCase.class); - - private static final AtomicInteger counter = new AtomicInteger(); - private static Client client; - private static String clusterAddresses; - private static String clusterHttpAddresses; - - private static Client startClient(Path tempDir, TransportAddress... transportAddresses) { - logger.info("--> Starting Elasticsearch Java TransportClient {}, {}", transportAddresses, tempDir); - - Settings clientSettings = Settings.builder() - .put("cluster.name", "qa_migrate_tests_" + counter.getAndIncrement()) - .put("client.transport.ignore_cluster_name", true) - .put("path.home", tempDir) - .put(SecurityField.USER_SETTING.getKey(), "transport_user:x-pack-test-password") - .build(); - - TransportClient client = new PreBuiltXPackTransportClient(clientSettings).addTransportAddresses(transportAddresses); - Exception clientException = null; - try { - logger.info("--> Elasticsearch Java TransportClient started"); - ClusterHealthResponse health = client.admin().cluster().prepareHealth().get(); - logger.info("--> connected to [{}] cluster which is running [{}] node(s).", - health.getClusterName(), health.getNumberOfNodes()); - } catch (Exception e) { - clientException = e; - } - - assumeNoException("Sounds like your cluster is not running at " + clusterAddresses, clientException); - - return client; - } - - private static Client startClient() throws UnknownHostException { - String[] stringAddresses = clusterAddresses.split(","); - TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; - int i = 0; - for (String stringAddress : stringAddresses) { - int lastColon = stringAddress.lastIndexOf(":"); - if (lastColon == -1) { - throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid"); - } - String ip = stringAddress.substring(0, lastColon); - String port = stringAddress.substring(lastColon + 1); - try { - transportAddresses[i++] = new TransportAddress(InetAddress.getByName(ip), Integer.valueOf(port)); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("port is not valid, expected number but was [" + port + "]"); - } - } - return startClient(createTempDir(), transportAddresses); - } - - public static Client getClient() { - if (client == null) { - try { - client = startClient(); - } catch (UnknownHostException e) { - logger.error("could not start the client", e); - } - assertThat(client, notNullValue()); - } - return client; - } - - public static String getHttpURL() { - return clusterHttpAddresses; - } - - @BeforeClass - public static void initializeSettings() throws UnknownHostException { - clusterAddresses = System.getProperty(TESTS_CLUSTER); - clusterHttpAddresses = System.getProperty(TESTS_HTTP_CLUSTER); - if (clusterAddresses == null || clusterAddresses.isEmpty()) { - throw new UnknownHostException("unable to get a cluster address"); - } - } - - @AfterClass - public static void stopTransportClient() { - if (client != null) { - client.close(); - client = null; - } - } - - @Before - public void defineIndexName() { - doClean(); - } - - @After - public void cleanIndex() { - doClean(); - } - - private void doClean() { - if (client != null) { - try { - client.admin().indices().prepareDelete("_all").get(); - } catch (Exception e) { - // We ignore this cleanup exception - } - } - } -} diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash b/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash index c267744194a1c..bafe7d9342f0e 100644 --- a/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash +++ b/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash @@ -17,7 +17,6 @@ verify_xpack_installation() { 'elasticsearch-certgen' 'elasticsearch-certutil' 'elasticsearch-croneval' - 'elasticsearch-migrate' 'elasticsearch-saml-metadata' 'elasticsearch-setup-passwords' 'elasticsearch-sql-cli' From e63b1fbb6ef7ab251ed31d707df4e4ea53f4ab01 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 17 May 2019 15:59:34 -0400 Subject: [PATCH 115/321] SQL: Suppress geo tests failing on tr-TR locale (#42200) Due to a bug in JTS WKT parser, JTS cannot parse most of WKT shapes if the shape type is written in the lower case. For examples `point (1 2)` is causing JTS inside H2GIS to fail on tr-TR locale as a result of case-insensitive comparison. --- .../org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java index 405efac5cac35..ec97cab6f10b1 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java @@ -31,6 +31,8 @@ public abstract class GeoSqlSpecTestCase extends SpecBaseIntegrationTestCase { @ClassRule public static LocalH2 H2 = new LocalH2((c) -> { + assumeTrue("JTS inside H2 is using default local for toUpperCase() in string comparison making it fail to parse WKT on certain" + + " locales", "point".toUpperCase(Locale.getDefault()).equals("POINT")); // Load GIS extensions H2GISFunctions.load(c); c.createStatement().execute("RUNSCRIPT FROM 'classpath:/ogc/sqltsch.sql'"); From 8c01a8d76635235f1c4233b710715f0cff4f8cbb Mon Sep 17 00:00:00 2001 From: Ed Savage <32410745+edsavage@users.noreply.github.com> Date: Fri, 17 May 2019 16:37:52 -0400 Subject: [PATCH 116/321] [ML] Improve hard_limit audit message (#42086) Improve the hard_limit memory audit message by reporting how many bytes over the configured memory limit the job was at the point of the last allocation failure. Previously the model memory usage was reported, however this was inaccurate and hence of limited use - primarily because the total memory used by the model can decrease significantly after the models status is changed to hard_limit but before the model size stats are reported from autodetect to ES. While this PR contains the changes to the format of the hard_limit audit message it is dependent on modifications to the ml-cpp backend to send additional data fields in the model size stats message. These changes will follow in a subsequent PR. It is worth noting that this PR must be merged prior to the ml-cpp one, to keep CI tests happy. --- .../client/ml/job/process/ModelSizeStats.java | 53 +++++++++++--- .../client/MachineLearningGetResultsIT.java | 32 ++++++++- .../ml/job/process/ModelSizeStatsTests.java | 8 +++ .../xpack/core/ml/job/messages/Messages.java | 7 +- .../autodetect/state/ModelSizeStats.java | 70 +++++++++++++++++-- .../autodetect/state/ModelSizeStatsTests.java | 8 +++ .../output/AutoDetectResultProcessor.java | 8 ++- .../AutoDetectResultProcessorTests.java | 7 +- 8 files changed, 171 insertions(+), 22 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java index c9a34fe5c98d9..6ea3cede0e3f1 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSizeStats.java @@ -47,6 +47,8 @@ public class ModelSizeStats implements ToXContentObject { * Field Names */ public static final ParseField MODEL_BYTES_FIELD = new ParseField("model_bytes"); + public static final ParseField MODEL_BYTES_EXCEEDED_FIELD = new ParseField("model_bytes_exceeded"); + public static final ParseField MODEL_BYTES_MEMORY_LIMIT_FIELD = new ParseField("model_bytes_memory_limit"); public static final ParseField TOTAL_BY_FIELD_COUNT_FIELD = new ParseField("total_by_field_count"); public static final ParseField TOTAL_OVER_FIELD_COUNT_FIELD = new ParseField("total_over_field_count"); public static final ParseField TOTAL_PARTITION_FIELD_COUNT_FIELD = new ParseField("total_partition_field_count"); @@ -61,6 +63,8 @@ public class ModelSizeStats implements ToXContentObject { static { PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); PARSER.declareLong(Builder::setModelBytes, MODEL_BYTES_FIELD); + PARSER.declareLong(Builder::setModelBytesExceeded, MODEL_BYTES_EXCEEDED_FIELD); + PARSER.declareLong(Builder::setModelBytesMemoryLimit, MODEL_BYTES_MEMORY_LIMIT_FIELD); PARSER.declareLong(Builder::setBucketAllocationFailuresCount, BUCKET_ALLOCATION_FAILURES_COUNT_FIELD); PARSER.declareLong(Builder::setTotalByFieldCount, TOTAL_BY_FIELD_COUNT_FIELD); PARSER.declareLong(Builder::setTotalOverFieldCount, TOTAL_OVER_FIELD_COUNT_FIELD); @@ -97,6 +101,8 @@ public String toString() { private final String jobId; private final long modelBytes; + private final Long modelBytesExceeded; + private final Long modelBytesMemoryLimit; private final long totalByFieldCount; private final long totalOverFieldCount; private final long totalPartitionFieldCount; @@ -105,11 +111,13 @@ public String toString() { private final Date timestamp; private final Date logTime; - private ModelSizeStats(String jobId, long modelBytes, long totalByFieldCount, long totalOverFieldCount, - long totalPartitionFieldCount, long bucketAllocationFailuresCount, MemoryStatus memoryStatus, - Date timestamp, Date logTime) { + private ModelSizeStats(String jobId, long modelBytes, Long modelBytesExceeded, Long modelBytesMemoryLimit, long totalByFieldCount, + long totalOverFieldCount, long totalPartitionFieldCount, long bucketAllocationFailuresCount, + MemoryStatus memoryStatus, Date timestamp, Date logTime) { this.jobId = jobId; this.modelBytes = modelBytes; + this.modelBytesExceeded = modelBytesExceeded; + this.modelBytesMemoryLimit = modelBytesMemoryLimit; this.totalByFieldCount = totalByFieldCount; this.totalOverFieldCount = totalOverFieldCount; this.totalPartitionFieldCount = totalPartitionFieldCount; @@ -126,6 +134,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Job.ID.getPreferredName(), jobId); builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); builder.field(MODEL_BYTES_FIELD.getPreferredName(), modelBytes); + if (modelBytesExceeded != null) { + builder.field(MODEL_BYTES_EXCEEDED_FIELD.getPreferredName(), modelBytesExceeded); + } + if (modelBytesMemoryLimit != null) { + builder.field(MODEL_BYTES_MEMORY_LIMIT_FIELD.getPreferredName(), modelBytesMemoryLimit); + } builder.field(TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName(), totalByFieldCount); builder.field(TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName(), totalOverFieldCount); builder.field(TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName(), totalPartitionFieldCount); @@ -148,6 +162,14 @@ public long getModelBytes() { return modelBytes; } + public Long getModelBytesExceeded() { + return modelBytesExceeded; + } + + public Long getModelBytesMemoryLimit() { + return modelBytesMemoryLimit; + } + public long getTotalByFieldCount() { return totalByFieldCount; } @@ -188,8 +210,8 @@ public Date getLogTime() { @Override public int hashCode() { - return Objects.hash(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return Objects.hash(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } /** @@ -207,7 +229,8 @@ public boolean equals(Object other) { ModelSizeStats that = (ModelSizeStats) other; - return this.modelBytes == that.modelBytes && this.totalByFieldCount == that.totalByFieldCount + return this.modelBytes == that.modelBytes && Objects.equals(this.modelBytesExceeded, that.modelBytesExceeded) + && Objects.equals(this.modelBytesMemoryLimit, that.modelBytesMemoryLimit) && this.totalByFieldCount == that.totalByFieldCount && this.totalOverFieldCount == that.totalOverFieldCount && this.totalPartitionFieldCount == that.totalPartitionFieldCount && this.bucketAllocationFailuresCount == that.bucketAllocationFailuresCount && Objects.equals(this.memoryStatus, that.memoryStatus) && Objects.equals(this.timestamp, that.timestamp) @@ -219,6 +242,8 @@ public static class Builder { private final String jobId; private long modelBytes; + private Long modelBytesExceeded; + private Long modelBytesMemoryLimit; private long totalByFieldCount; private long totalOverFieldCount; private long totalPartitionFieldCount; @@ -236,6 +261,8 @@ public Builder(String jobId) { public Builder(ModelSizeStats modelSizeStats) { this.jobId = modelSizeStats.jobId; this.modelBytes = modelSizeStats.modelBytes; + this.modelBytesExceeded = modelSizeStats.modelBytesExceeded; + this.modelBytesMemoryLimit = modelSizeStats.modelBytesMemoryLimit; this.totalByFieldCount = modelSizeStats.totalByFieldCount; this.totalOverFieldCount = modelSizeStats.totalOverFieldCount; this.totalPartitionFieldCount = modelSizeStats.totalPartitionFieldCount; @@ -250,6 +277,16 @@ public Builder setModelBytes(long modelBytes) { return this; } + public Builder setModelBytesExceeded(long modelBytesExceeded) { + this.modelBytesExceeded = modelBytesExceeded; + return this; + } + + public Builder setModelBytesMemoryLimit(long modelBytesMemoryLimit) { + this.modelBytesMemoryLimit = modelBytesMemoryLimit; + return this; + } + public Builder setTotalByFieldCount(long totalByFieldCount) { this.totalByFieldCount = totalByFieldCount; return this; @@ -287,8 +324,8 @@ public Builder setLogTime(Date logTime) { } public ModelSizeStats build() { - return new ModelSizeStats(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return new ModelSizeStats(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java index 092bc254f50fa..34ca5cd2aa448 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java @@ -150,11 +150,15 @@ private void addCategoriesIndexRequests(BulkRequest bulkRequest) { private void addModelSnapshotIndexRequests(BulkRequest bulkRequest) { { + // Index a number of model snapshots, one of which contains the new model_size_stats fields + // 'model_bytes_exceeded' and 'model_bytes_memory_limit' that were introduced in 7.2.0. + // We want to verify that we can parse the snapshots whether or not these fields are present. IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX); indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"timestamp\":1541587919000, " + "\"description\":\"State persisted due to job close at 2018-11-07T10:51:59+0000\", \"snapshot_id\":\"1541587919\"," + "\"snapshot_doc_count\":1, \"model_size_stats\":{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"model_size_stats\"," + - "\"model_bytes\":51722, \"total_by_field_count\":3, \"total_over_field_count\":0, \"total_partition_field_count\":2," + + "\"model_bytes\":51722, \"model_bytes_exceeded\":10762, \"model_bytes_memory_limit\":40960, \"total_by_field_count\":3, " + + "\"total_over_field_count\":0, \"total_partition_field_count\":2," + "\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\", \"log_time\":1541587919000," + " \"timestamp\":1519930800000},\"latest_record_time_stamp\":1519931700000, \"latest_result_time_stamp\":1519930800000," + " \"retain\":false }", XContentType.JSON); @@ -223,6 +227,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -241,6 +247,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -259,6 +267,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(2).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(2).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -288,6 +298,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(2).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(2).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(2).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -306,6 +318,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -324,6 +338,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -353,6 +369,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -383,6 +401,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -402,6 +422,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -430,6 +452,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -470,6 +494,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -488,6 +514,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); @@ -517,6 +545,8 @@ public void testGetModelSnapshots() throws IOException { assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L))); assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID)); assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null)); + assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L)); assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java index 4a12a75f2b17d..8c43feb545a26 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/process/ModelSizeStatsTests.java @@ -31,6 +31,8 @@ public class ModelSizeStatsTests extends AbstractXContentTestCase createParser(boolean igno parser.declareString(ConstructingObjectParser.constructorArg(), Job.ID); parser.declareString((modelSizeStat, s) -> {}, Result.RESULT_TYPE); parser.declareLong(Builder::setModelBytes, MODEL_BYTES_FIELD); + parser.declareLong(Builder::setModelBytesExceeded, MODEL_BYTES_EXCEEDED_FIELD); + parser.declareLong(Builder::setModelBytesMemoryLimit, MODEL_BYTES_MEMORY_LIMIT_FIELD); parser.declareLong(Builder::setBucketAllocationFailuresCount, BUCKET_ALLOCATION_FAILURES_COUNT_FIELD); parser.declareLong(Builder::setTotalByFieldCount, TOTAL_BY_FIELD_COUNT_FIELD); parser.declareLong(Builder::setTotalOverFieldCount, TOTAL_OVER_FIELD_COUNT_FIELD); @@ -100,6 +105,8 @@ public String toString() { private final String jobId; private final long modelBytes; + private final Long modelBytesExceeded; + private final Long modelBytesMemoryLimit; private final long totalByFieldCount; private final long totalOverFieldCount; private final long totalPartitionFieldCount; @@ -108,11 +115,14 @@ public String toString() { private final Date timestamp; private final Date logTime; - private ModelSizeStats(String jobId, long modelBytes, long totalByFieldCount, long totalOverFieldCount, - long totalPartitionFieldCount, long bucketAllocationFailuresCount, MemoryStatus memoryStatus, + private ModelSizeStats(String jobId, long modelBytes, Long modelBytesExceeded, Long modelBytesMemoryLimit, long totalByFieldCount, + long totalOverFieldCount, long totalPartitionFieldCount, long bucketAllocationFailuresCount, + MemoryStatus memoryStatus, Date timestamp, Date logTime) { this.jobId = jobId; this.modelBytes = modelBytes; + this.modelBytesExceeded = modelBytesExceeded; + this.modelBytesMemoryLimit = modelBytesMemoryLimit; this.totalByFieldCount = totalByFieldCount; this.totalOverFieldCount = totalOverFieldCount; this.totalPartitionFieldCount = totalPartitionFieldCount; @@ -125,6 +135,16 @@ private ModelSizeStats(String jobId, long modelBytes, long totalByFieldCount, lo public ModelSizeStats(StreamInput in) throws IOException { jobId = in.readString(); modelBytes = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_7_2_0)) { + modelBytesExceeded = in.readOptionalLong(); + } else { + modelBytesExceeded = null; + } + if (in.getVersion().onOrAfter(Version.V_7_2_0)) { + modelBytesMemoryLimit = in.readOptionalLong(); + } else { + modelBytesMemoryLimit = null; + } totalByFieldCount = in.readVLong(); totalOverFieldCount = in.readVLong(); totalPartitionFieldCount = in.readVLong(); @@ -146,6 +166,12 @@ public static String documentIdPrefix(String jobId) { public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); out.writeVLong(modelBytes); + if (out.getVersion().onOrAfter(Version.V_7_2_0)) { + out.writeOptionalLong(modelBytesExceeded); + } + if (out.getVersion().onOrAfter(Version.V_7_2_0)) { + out.writeOptionalLong(modelBytesMemoryLimit); + } out.writeVLong(totalByFieldCount); out.writeVLong(totalOverFieldCount); out.writeVLong(totalPartitionFieldCount); @@ -171,6 +197,12 @@ public XContentBuilder doXContentBody(XContentBuilder builder) throws IOExceptio builder.field(Job.ID.getPreferredName(), jobId); builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); builder.field(MODEL_BYTES_FIELD.getPreferredName(), modelBytes); + if (modelBytesExceeded != null) { + builder.field(MODEL_BYTES_EXCEEDED_FIELD.getPreferredName(), modelBytesExceeded); + } + if (modelBytesMemoryLimit != null) { + builder.field(MODEL_BYTES_MEMORY_LIMIT_FIELD.getPreferredName(), modelBytesMemoryLimit); + } builder.field(TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName(), totalByFieldCount); builder.field(TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName(), totalOverFieldCount); builder.field(TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName(), totalPartitionFieldCount); @@ -192,6 +224,14 @@ public long getModelBytes() { return modelBytes; } + public Long getModelBytesExceeded() { + return modelBytesExceeded; + } + + public Long getModelBytesMemoryLimit() { + return modelBytesMemoryLimit; + } + public long getTotalByFieldCount() { return totalByFieldCount; } @@ -231,8 +271,8 @@ public Date getLogTime() { @Override public int hashCode() { // this.id excluded here as it is generated by the datastore - return Objects.hash(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return Objects.hash(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } /** @@ -250,7 +290,9 @@ public boolean equals(Object other) { ModelSizeStats that = (ModelSizeStats) other; - return this.modelBytes == that.modelBytes && this.totalByFieldCount == that.totalByFieldCount + return this.modelBytes == that.modelBytes && Objects.equals(this.modelBytesExceeded, that.modelBytesExceeded) + && Objects.equals(this.modelBytesMemoryLimit, that.modelBytesMemoryLimit) + && this.totalByFieldCount == that.totalByFieldCount && this.totalOverFieldCount == that.totalOverFieldCount && this.totalPartitionFieldCount == that.totalPartitionFieldCount && this.bucketAllocationFailuresCount == that.bucketAllocationFailuresCount && Objects.equals(this.memoryStatus, that.memoryStatus) && Objects.equals(this.timestamp, that.timestamp) @@ -262,6 +304,8 @@ public static class Builder { private final String jobId; private long modelBytes; + private Long modelBytesExceeded; + private Long modelBytesMemoryLimit; private long totalByFieldCount; private long totalOverFieldCount; private long totalPartitionFieldCount; @@ -279,6 +323,8 @@ public Builder(String jobId) { public Builder(ModelSizeStats modelSizeStats) { this.jobId = modelSizeStats.jobId; this.modelBytes = modelSizeStats.modelBytes; + this.modelBytesExceeded = modelSizeStats.modelBytesExceeded; + this.modelBytesMemoryLimit = modelSizeStats.modelBytesMemoryLimit; this.totalByFieldCount = modelSizeStats.totalByFieldCount; this.totalOverFieldCount = modelSizeStats.totalOverFieldCount; this.totalPartitionFieldCount = modelSizeStats.totalPartitionFieldCount; @@ -293,6 +339,16 @@ public Builder setModelBytes(long modelBytes) { return this; } + public Builder setModelBytesExceeded(long modelBytesExceeded) { + this.modelBytesExceeded = modelBytesExceeded; + return this; + } + + public Builder setModelBytesMemoryLimit(long modelBytesMemoryLimit) { + this.modelBytesMemoryLimit = modelBytesMemoryLimit; + return this; + } + public Builder setTotalByFieldCount(long totalByFieldCount) { this.totalByFieldCount = totalByFieldCount; return this; @@ -330,8 +386,8 @@ public Builder setLogTime(Date logTime) { } public ModelSizeStats build() { - return new ModelSizeStats(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, - bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + return new ModelSizeStats(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount, + totalPartitionFieldCount, bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java index e66fea90f049b..90e4bacc3f8b1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java @@ -22,6 +22,8 @@ public class ModelSizeStatsTests extends AbstractSerializingTestCase Date: Sat, 18 May 2019 09:36:33 -0400 Subject: [PATCH 117/321] Remove Legacy UUID Generator (#42204) * This isn't used anymore in 8.0 --- .../common/LegacyTimeBasedUUIDGenerator.java | 87 ------------------- .../java/org/elasticsearch/common/UUIDs.java | 6 -- 2 files changed, 93 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/common/LegacyTimeBasedUUIDGenerator.java diff --git a/server/src/main/java/org/elasticsearch/common/LegacyTimeBasedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/LegacyTimeBasedUUIDGenerator.java deleted file mode 100644 index 74a08711042f7..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/LegacyTimeBasedUUIDGenerator.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common; - -import java.util.Base64; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * These are essentially flake ids, but we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. - * For more information about flake ids, check out - * https://archive.fo/2015.07.08-082503/http://www.boundary.com/blog/2012/01/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang/ - */ - -class LegacyTimeBasedUUIDGenerator implements UUIDGenerator { - - // We only use bottom 3 bytes for the sequence number. Paranoia: init with random int so that if JVM/OS/machine goes down, clock slips - // backwards, and JVM comes back up, we are less likely to be on the same sequenceNumber at the same time: - private final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); - - // Used to ensure clock moves forward: - private long lastTimestamp; - - private static final byte[] SECURE_MUNGED_ADDRESS = MacAddressProvider.getSecureMungedAddress(); - - static { - assert SECURE_MUNGED_ADDRESS.length == 6; - } - - /** Puts the lower numberOfLongBytes from l into the array, starting index pos. */ - private static void putLong(byte[] array, long l, int pos, int numberOfLongBytes) { - for (int i=0; i>> (i*8)); - } - } - - @Override - public String getBase64UUID() { - final int sequenceId = sequenceNumber.incrementAndGet() & 0xffffff; - long timestamp = System.currentTimeMillis(); - - synchronized (this) { - // Don't let timestamp go backwards, at least "on our watch" (while this JVM is running). We are still vulnerable if we are - // shut down, clock goes backwards, and we restart... for this we randomize the sequenceNumber on init to decrease chance of - // collision: - timestamp = Math.max(lastTimestamp, timestamp); - - if (sequenceId == 0) { - // Always force the clock to increment whenever sequence number is 0, in case we have a long time-slip backwards: - timestamp++; - } - - lastTimestamp = timestamp; - } - - final byte[] uuidBytes = new byte[15]; - - // Only use lower 6 bytes of the timestamp (this will suffice beyond the year 10000): - putLong(uuidBytes, timestamp, 0, 6); - - // MAC address adds 6 bytes: - System.arraycopy(SECURE_MUNGED_ADDRESS, 0, uuidBytes, 6, SECURE_MUNGED_ADDRESS.length); - - // Sequence number adds 3 bytes: - putLong(uuidBytes, sequenceId, 12, 3); - - assert 9 + SECURE_MUNGED_ADDRESS.length == uuidBytes.length; - - return Base64.getUrlEncoder().withoutPadding().encodeToString(uuidBytes); - } -} diff --git a/server/src/main/java/org/elasticsearch/common/UUIDs.java b/server/src/main/java/org/elasticsearch/common/UUIDs.java index a6a314c2cccb0..46643a79da2e2 100644 --- a/server/src/main/java/org/elasticsearch/common/UUIDs.java +++ b/server/src/main/java/org/elasticsearch/common/UUIDs.java @@ -26,7 +26,6 @@ public class UUIDs { private static final RandomBasedUUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); - private static final UUIDGenerator LEGACY_TIME_UUID_GENERATOR = new LegacyTimeBasedUUIDGenerator(); private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator(); /** Generates a time-based UUID (similar to Flake IDs), which is preferred when generating an ID to be indexed into a Lucene index as @@ -35,11 +34,6 @@ public static String base64UUID() { return TIME_UUID_GENERATOR.getBase64UUID(); } - /** Legacy implementation of {@link #base64UUID()}, for pre 6.0 indices. */ - public static String legacyBase64UUID() { - return LEGACY_TIME_UUID_GENERATOR.getBase64UUID(); - } - /** Returns a Base64 encoded version of a Version 4.0 compatible UUID as defined here: http://www.ietf.org/rfc/rfc4122.txt, using the * provided {@code Random} instance */ public static String randomBase64UUID(Random random) { From bd6c85b3ce36fcb469605a17887faf7cc289f6aa Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sat, 18 May 2019 09:37:19 -0400 Subject: [PATCH 118/321] Cleanup AllocateDangledResponse (#42149) * Field `ack` is unused so it was removed along with the constructor argument * Added BwC logic to the serialization for now, once that has become unnecessary we can just use `org.elasticsearch.transport.TransportResponse.Empty` here --- .../gateway/LocalAllocateDangledIndices.java | 26 +++++++------------ 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index b51d16dbc5116..48117eed2d56d 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -89,9 +89,7 @@ public void allocateDangled(Collection indices, ActionListener() { @Override public AllocateDangledResponse read(StreamInput in) throws IOException { - final AllocateDangledResponse response = new AllocateDangledResponse(); - response.readFrom(in); - return response; + return new AllocateDangledResponse(in); } @Override @@ -198,7 +196,7 @@ public void onFailure(String source, Exception e) { @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { try { - channel.sendResponse(new AllocateDangledResponse(true)); + channel.sendResponse(new AllocateDangledResponse()); } catch (IOException e) { logger.warn("failed send response for allocating dangled", e); } @@ -243,25 +241,21 @@ public void writeTo(StreamOutput out) throws IOException { public static class AllocateDangledResponse extends TransportResponse { - private boolean ack; - - AllocateDangledResponse() { - } - - AllocateDangledResponse(boolean ack) { - this.ack = ack; + private AllocateDangledResponse(StreamInput in) throws IOException { + if (in.getVersion().before(Version.V_8_0_0)) { + in.readBoolean(); + } } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - ack = in.readBoolean(); + private AllocateDangledResponse() { } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeBoolean(ack); + if (out.getVersion().before(Version.V_8_0_0)) { + out.writeBoolean(true); + } } } } From 3dbfe0339d5c555aab142f4be5378d3f7b5d6380 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Sun, 19 May 2019 08:47:40 -0400 Subject: [PATCH 119/321] [ML] Temporarily muting failing tests Muting a number of AutoDetectMemoryLimitIT tests to give CI a chance to settle before easing in required backend changes. relates elastic/ml-cpp#486 relates #42086 --- .../xpack/ml/integration/AutodetectMemoryLimitIT.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java index 03860ea9ae044..2f00591420520 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java @@ -84,6 +84,7 @@ public void testTooManyPartitions() throws Exception { assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42207") public void testTooManyByFields() throws Exception { Detector.Builder detector = new Detector.Builder("count", null); detector.setByFieldName("user"); @@ -129,6 +130,7 @@ public void testTooManyByFields() throws Exception { assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42207") public void testTooManyByAndOverFields() throws Exception { Detector.Builder detector = new Detector.Builder("count", null); detector.setByFieldName("department"); @@ -178,6 +180,7 @@ public void testTooManyByAndOverFields() throws Exception { assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42207") public void testManyDistinctOverFields() throws Exception { Detector.Builder detector = new Detector.Builder("sum", "value"); detector.setOverFieldName("user"); From 0729dc49cac9f36c72da16c177a7e3dd16cdf0da Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 19 May 2019 20:43:41 -0400 Subject: [PATCH 120/321] Minor improvement translog docs (#42184) Closes #42183 --- docs/reference/index-modules/translog.asciidoc | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index 705fb81b09c8c..6821e583a79dd 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -29,13 +29,11 @@ The data in the translog is only persisted to disk when the translog is ++fsync++ed and committed. In the event of hardware failure, any data written since the previous translog commit will be lost. -By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds -if `index.translog.durability` is set to `async` or if set to `request` -(default) at the end of every <>, <>, -<>, or <> request. More precisely, if set -to `request`, Elasticsearch will only report success of an index, delete, +By default, `index.translog.durability` is set to `request` meaning that Elasticsearch will only report success of an index, delete, update, or bulk request to the client after the translog has been successfully -++fsync++ed and committed on the primary and on every allocated replica. +++fsync++ed and committed on the primary and on every allocated replica. If +`index.translog.durability` is set to `async` then Elasticsearch ++fsync++s +and commits the translog every `index.translog.sync_interval` (defaults to 5 seconds). The following <> per-index settings control the behaviour of the translog: From efbe58131b57082a3497a40e0911e0d3057ac6e1 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Mon, 20 May 2019 11:51:27 +1000 Subject: [PATCH 121/321] Do not refresh realm cache unless required (#42169) If there are no realms that depend on the native role mapping store, then changes should it should not perform any cache refresh. A refresh with an empty realm array will refresh all realms. This also fixes a spurious log warning that could occur if the role mapping store was notified that the security index was recovered before any realm were attached. Resolves: #35218 --- .../mapper/NativeRoleMappingStore.java | 10 +++- .../mapper/NativeRoleMappingStoreTests.java | 55 +++++++++++++------ 2 files changed, 45 insertions(+), 20 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index 1b6da7f68ca4e..bb98dddbe1ddf 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -329,7 +330,12 @@ public void onSecurityIndexStateChange(SecurityIndexManager.State previousState, } private void refreshRealms(ActionListener listener, Result result) { - String[] realmNames = this.realmsToRefresh.toArray(new String[realmsToRefresh.size()]); + if (realmsToRefresh.isEmpty()) { + listener.onResponse(result); + return; + } + + final String[] realmNames = this.realmsToRefresh.toArray(Strings.EMPTY_ARRAY); final SecurityClient securityClient = new SecurityClient(client); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, securityClient.prepareClearRealmCache().realms(realmNames).request(), @@ -340,7 +346,7 @@ private void refreshRealms(ActionListener listener, Result resu listener.onResponse(result); }, ex -> { - logger.warn("Failed to clear cache for realms [{}]", Arrays.toString(realmNames)); + logger.warn(new ParameterizedMessage("Failed to clear cache for realms [{}]", Arrays.toString(realmNames)), ex); listener.onFailure(ex); }), securityClient::clearRealmCache); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 6bb6e0c7b5854..3cca6cc4fd380 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -143,7 +143,7 @@ private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { public void testCacheClearOnIndexHealthChange() { final AtomicInteger numInvalidation = new AtomicInteger(0); - final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation); + final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation, true); int expectedInvalidation = 0; // existing to no longer present @@ -180,7 +180,7 @@ public void testCacheClearOnIndexHealthChange() { public void testCacheClearOnIndexOutOfDateChange() { final AtomicInteger numInvalidation = new AtomicInteger(0); - final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation); + final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation, true); store.onSecurityIndexStateChange( new SecurityIndexManager.State(Instant.now(), false, true, true, null, concreteSecurityIndexName, null), @@ -193,40 +193,59 @@ public void testCacheClearOnIndexOutOfDateChange() { assertEquals(2, numInvalidation.get()); } - private NativeRoleMappingStore buildRoleMappingStoreForInvalidationTesting(AtomicInteger invalidationCounter) { + public void testCacheIsNotClearedIfNoRealmsAreAttached() { + final AtomicInteger numInvalidation = new AtomicInteger(0); + final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation, false); + + final SecurityIndexManager.State noIndexState = dummyState(null); + final SecurityIndexManager.State greenIndexState = dummyState(ClusterHealthStatus.GREEN); + store.onSecurityIndexStateChange(noIndexState, greenIndexState); + assertEquals(0, numInvalidation.get()); + } + + private NativeRoleMappingStore buildRoleMappingStoreForInvalidationTesting(AtomicInteger invalidationCounter, boolean attachRealm) { final Settings settings = Settings.builder().put("path.home", createTempDir()).build(); final ThreadPool threadPool = mock(ThreadPool.class); final ThreadContext threadContext = new ThreadContext(settings); when(threadPool.getThreadContext()).thenReturn(threadContext); + final String realmName = randomAlphaOfLengthBetween(4, 8); + final Client client = mock(Client.class); when(client.threadPool()).thenReturn(threadPool); when(client.settings()).thenReturn(settings); doAnswer(invocationOnMock -> { + assertThat(invocationOnMock.getArguments(), Matchers.arrayWithSize(3)); + final ClearRealmCacheRequest request = (ClearRealmCacheRequest) invocationOnMock.getArguments()[1]; + assertThat(request.realms(), Matchers.arrayContaining(realmName)); + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; invalidationCounter.incrementAndGet(); listener.onResponse(new ClearRealmCacheResponse(new ClusterName("cluster"), Collections.emptyList(), Collections.emptyList())); return null; }).when(client).execute(eq(ClearRealmCacheAction.INSTANCE), any(ClearRealmCacheRequest.class), any(ActionListener.class)); - final Environment env = TestEnvironment.newEnvironment(settings); - final RealmConfig realmConfig = new RealmConfig(new RealmConfig.RealmIdentifier("ldap", getTestName()), - settings, env, threadContext); - final CachingUsernamePasswordRealm mockRealm = new CachingUsernamePasswordRealm(realmConfig, threadPool) { - @Override - protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { - listener.onResponse(AuthenticationResult.notHandled()); - } - - @Override - protected void doLookupUser(String username, ActionListener listener) { - listener.onResponse(null); - } - }; final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, mock(SecurityIndexManager.class), mock(ScriptService.class)); - store.refreshRealmOnChange(mockRealm); + + if (attachRealm) { + final Environment env = TestEnvironment.newEnvironment(settings); + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier("ldap", realmName); + final RealmConfig realmConfig = new RealmConfig(identifier, settings, env, threadContext); + final CachingUsernamePasswordRealm mockRealm = new CachingUsernamePasswordRealm(realmConfig, threadPool) { + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + listener.onResponse(AuthenticationResult.notHandled()); + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + listener.onResponse(null); + } + }; + store.refreshRealmOnChange(mockRealm); + } return store; } } From f765e21583ceab1b0ebf352eb61546114c509084 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 19 May 2019 22:05:52 -0400 Subject: [PATCH 122/321] Enable debug log in testRetentionLeasesSyncOnRecovery Relates #39105 --- .../java/org/elasticsearch/index/seqno/RetentionLeaseIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index 92d31e305adc7..cb40a0726d42f 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -356,7 +356,7 @@ public void testRetentionLeasesBackgroundSyncWithSoftDeletesDisabled() throws Ex assertFalse("retention leases background sync must be a noop if soft deletes is disabled", backgroundSyncRequestSent.get()); } - @TestLogging(value = "org.elasticsearch.indices.recovery:trace") + @TestLogging(value = "org.elasticsearch.index:debug,org.elasticsearch.indices.recovery:trace") public void testRetentionLeasesSyncOnRecovery() throws Exception { final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); From 5fe1fda69258d94204c793e317e92bf0bd804d79 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Mon, 20 May 2019 14:22:28 +1000 Subject: [PATCH 123/321] Add cluster restart for security on basic (#41933) This performs a simple restart test to move a basic licensed cluster from no security (the default) to security & transport TLS enabled. --- .../qa/basic-enable-security/build.gradle | 68 ++++++++ .../EnableSecurityOnBasicLicenseIT.java | 156 ++++++++++++++++++ .../src/test/resources/roles.yml | 14 ++ .../src/test/resources/ssl/README.asciidoc | 30 ++++ .../src/test/resources/ssl/ca.crt | 20 +++ .../src/test/resources/ssl/ca.key | 30 ++++ .../src/test/resources/ssl/transport.crt | 22 +++ .../src/test/resources/ssl/transport.key | 30 ++++ 8 files changed, 370 insertions(+) create mode 100644 x-pack/plugin/security/qa/basic-enable-security/build.gradle create mode 100644 x-pack/plugin/security/qa/basic-enable-security/src/test/java/org/elasticsearch/xpack/security/EnableSecurityOnBasicLicenseIT.java create mode 100644 x-pack/plugin/security/qa/basic-enable-security/src/test/resources/roles.yml create mode 100644 x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/README.asciidoc create mode 100644 x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.crt create mode 100644 x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.key create mode 100644 x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.crt create mode 100644 x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.key diff --git a/x-pack/plugin/security/qa/basic-enable-security/build.gradle b/x-pack/plugin/security/qa/basic-enable-security/build.gradle new file mode 100644 index 0000000000000..a21e3c68d3fc4 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/build.gradle @@ -0,0 +1,68 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +task integTestNoSecurity(type: RestIntegTestTask) { + description = "Run tests against a cluster that doesn't have security" +} +tasks.getByName("integTestNoSecurityRunner").configure { + systemProperty 'tests.has_security', 'false' +} +check.dependsOn(integTestNoSecurity) + +task integTestSecurity(type: RestIntegTestTask) { + dependsOn integTestNoSecurity + description = "Run tests against a cluster that has security" +} +tasks.getByName("integTestSecurityRunner").configure { + systemProperty 'tests.has_security', 'true' +} +check.dependsOn(integTestSecurity) + +configure(extensions.findByName("integTestNoSecurityCluster")) { + clusterName = "enable-security-on-basic" + numNodes = 2 + + setting 'xpack.ilm.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'xpack.security.enabled', 'false' +} + +Task noSecurityTest = tasks.findByName("integTestNoSecurity") +configure(extensions.findByName("integTestSecurityCluster")) { + clusterName = "basic-license" + numNodes = 2 + dataDir = { nodeNum -> noSecurityTest.nodes[nodeNum].dataDir } + + setting 'xpack.ilm.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.authc.anonymous.roles', 'anonymous' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.transport.ssl.certificate', 'transport.crt' + setting 'xpack.security.transport.ssl.key', 'transport.key' + setting 'xpack.security.transport.ssl.key_passphrase', 'transport-password' + setting 'xpack.security.transport.ssl.certificate_authorities', 'ca.crt' + + extraConfigFile 'transport.key', project.projectDir.toPath().resolve('src/test/resources/ssl/transport.key').toFile() + extraConfigFile 'transport.crt', project.projectDir.toPath().resolve('src/test/resources/ssl/transport.crt').toFile() + extraConfigFile 'ca.crt', project.projectDir.toPath().resolve('src/test/resources/ssl/ca.crt').toFile() + + setupCommand 'setupAdminUser', + 'bin/elasticsearch-users', 'useradd', 'admin_user', '-p', 'admin-password', '-r', 'superuser' + setupCommand 'setupTestUser' , + 'bin/elasticsearch-users', 'useradd', 'security_test_user', '-p', 'security-test-password', '-r', 'security_test_role' + extraConfigFile 'roles.yml', project.projectDir.toPath().resolve('src/test/resources/roles.yml').toFile() +} + +integTest.enabled = false diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/java/org/elasticsearch/xpack/security/EnableSecurityOnBasicLicenseIT.java b/x-pack/plugin/security/qa/basic-enable-security/src/test/java/org/elasticsearch/xpack/security/EnableSecurityOnBasicLicenseIT.java new file mode 100644 index 0000000000000..fa64a89f2f633 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/java/org/elasticsearch/xpack/security/EnableSecurityOnBasicLicenseIT.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.xpack.security.authc.InternalRealms; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class EnableSecurityOnBasicLicenseIT extends ESRestTestCase { + + private static boolean securityEnabled; + + @BeforeClass + public static void checkTestMode() { + final String hasSecurity = System.getProperty("tests.has_security"); + securityEnabled = Booleans.parseBoolean(hasSecurity); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("security_test_user", new SecureString("security-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + // If this is the first run (security not yet enabled), then don't clean up afterwards because we want to test restart with data + return securityEnabled == false; + } + + public void testSecuritySetup() throws Exception { + logger.info("Security status: {}", securityEnabled); + logger.info("Cluster:\n{}", getClusterInfo()); + logger.info("Indices:\n{}", getIndices()); + checkBasicLicenseType(); + + checkSecurityStatus(securityEnabled); + if (securityEnabled) { + checkAuthentication(); + } + + checkAllowedWrite("index_allowed"); + // Security runs second, and should see the doc from the first (non-security) run + final int expectedIndexCount = securityEnabled ? 2 : 1; + checkIndexCount("index_allowed", expectedIndexCount); + + final String otherIndex = "index_" + randomAlphaOfLengthBetween(2, 6).toLowerCase(Locale.ROOT); + if (securityEnabled) { + checkDeniedWrite(otherIndex); + } else { + checkAllowedWrite(otherIndex); + } + } + + private String getClusterInfo() throws IOException { + Map info = getAsMap("/"); + assertThat(info, notNullValue()); + return info.toString(); + } + + private String getIndices() throws IOException { + final Request request = new Request("GET", "/_cat/indices"); + Response response = client().performRequest(request); + return EntityUtils.toString(response.getEntity()); + } + + private void checkBasicLicenseType() throws IOException { + Map license = getAsMap("/_license"); + assertThat(license, notNullValue()); + assertThat(ObjectPath.evaluate(license, "license.type"), equalTo("basic")); + } + + private void checkSecurityStatus(boolean expectEnabled) throws IOException { + Map usage = getAsMap("/_xpack/usage"); + assertThat(usage, notNullValue()); + assertThat(ObjectPath.evaluate(usage, "security.available"), equalTo(true)); + assertThat(ObjectPath.evaluate(usage, "security.enabled"), equalTo(expectEnabled)); + if (expectEnabled) { + for (String realm : Arrays.asList("file", "native")) { + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".available"), equalTo(true)); + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".enabled"), equalTo(true)); + } + for (String realm : InternalRealms.getConfigurableRealmsTypes()) { + if (realm.equals("file") == false && realm.equals("native") == false) { + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".available"), equalTo(false)); + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".enabled"), equalTo(false)); + } + } + } + } + + private void checkAuthentication() throws IOException { + final Map auth = getAsMap("/_security/_authenticate"); + // From file realm, configured in build.gradle + assertThat(ObjectPath.evaluate(auth, "username"), equalTo("security_test_user")); + assertThat(ObjectPath.evaluate(auth, "roles"), contains("security_test_role")); + } + + private void checkAllowedWrite(String indexName) throws IOException { + final Request request = new Request("POST", "/" + indexName + "/_doc"); + request.setJsonEntity("{ \"key\" : \"value\" }"); + Response response = client().performRequest(request); + final Map result = entityAsMap(response); + assertThat(ObjectPath.evaluate(result, "_index"), equalTo(indexName)); + assertThat(ObjectPath.evaluate(result, "result"), equalTo("created")); + } + + private void checkDeniedWrite(String indexName) { + final Request request = new Request("POST", "/" + indexName + "/_doc"); + request.setJsonEntity("{ \"key\" : \"value\" }"); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e.getMessage(), containsString("unauthorized for user [security_test_user]")); + } + + private void checkIndexCount(String indexName, int expectedCount) throws IOException { + final Request request = new Request("POST", "/" + indexName + "/_refresh"); + adminClient().performRequest(request); + + final Map result = getAsMap("/" + indexName + "/_count"); + assertThat(ObjectPath.evaluate(result, "count"), equalTo(expectedCount)); + } +} diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/roles.yml b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/roles.yml new file mode 100644 index 0000000000000..eb6c3ec45786b --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/roles.yml @@ -0,0 +1,14 @@ +# A basic role that is used to test security +security_test_role: + cluster: + - monitor + - "cluster:admin/xpack/license/*" + indices: + - names: [ "index_allowed" ] + privileges: [ "read", "write", "create_index" ] + - names: [ "*" ] + privileges: [ "monitor" ] + +anonymous: + cluster: + - monitor \ No newline at end of file diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/README.asciidoc b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/README.asciidoc new file mode 100644 index 0000000000000..b3729f42d17b0 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/README.asciidoc @@ -0,0 +1,30 @@ += Keystore Details + +This document details the steps used to create the certificate and keystore files in this directory. + +== Instructions on generating certificates +The certificates in this directory have been generated using elasticsearch-certutil (7.0.0 SNAPSHOT) + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil ca --pem --out=ca.zip --pass="ca-password" --days=3500 +unzip ca.zip +mv ca/ca.* ./ + +rm ca.zip +rmdir ca +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil cert --pem --name=transport --out=transport.zip --pass="transport-password" --days=3500 \ + --ca-cert=ca.crt --ca-key=ca.key --ca-pass="ca-password" \ + --dns=localhost --dns=localhost.localdomain --dns=localhost4 --dns=localhost4.localdomain4 --dns=localhost6 --dns=localhost6.localdomain6 \ + --ip=127.0.0.1 --ip=0:0:0:0:0:0:0:1 + +unzip transport.zip +mv transport/transport.* ./ + +rm transport.zip +rmdir transport +----------------------------------------------------------------------------------------------------------- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.crt b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.crt new file mode 100644 index 0000000000000..95068217a612a --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIVAL0RCyWTbBDd2ntuWoqRwW0IE9+9MA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTE5MDQzMDAzNTQwN1oXDTI4MTEyODAzNTQwN1owNDEyMDAG +A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDA4VwADiyl+Xl15D27gtpS +TXZfHt40MUx12FY0MEd3A3hU+Fp4PaLE2ejECx04yrq8Rfc0Yltux/Fc5zE98XM8 +dY4j0QN/e6C/f0mrBI0KaJ25nv0MWFvoqS/D3vWvDFLUP1a3OZICWWoBDG+zCHe5 +Aq0qwge+FU9IUc7G2WPJeUp4e0+EzLxFInls3rTX1xkyq8Q6PT3gi0RZKvHqIudL +DAXDVEGWNxEX9KwQ1nMtRkDZICx/W665kZiBD4XC3WuEkYlDL1ISVw3cmsbYdhb4 +IusIK5zNERi4ewTgDDxic8TbRpkQW189/M3IglrQipH5ixfF6oNSyoRVAa3KZqj5 +AgMBAAGjUzBRMB0GA1UdDgQWBBRI4mOaeunbu60GfjWTpHcvhb6/YTAfBgNVHSME +GDAWgBRI4mOaeunbu60GfjWTpHcvhb6/YTAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBCwUAA4IBAQCUOXddlGoU+Ni85D0cRjYYxyx8a5Rwngp+kztttT/5l3Ch +5JMZyl/xcaTryh37BG3+NuqKR1zHtcLpq/+xaCrwBQ8glJofF+1n9w4zBL9nrH5c +O5NgG7+u/sfB+xdqMVdoBBqfm1Roq7O1T/kBXis1+5ZtBlj+7WIKeWWTZGLTrHV+ +MW5RDOmMoLkqT5qzpR9Yf7UChPVrvKGs4Kd+fYJeb0R5W6mvZQ6/FrsLwAWLC2Q1 +rW1u4zIkO0ih5qd52dl/73u7SWqzWxPy1ynwqJefD4AA0uaJYtMlXHK2vYjutHvY +K7301gzc5fueqo1YMmPgsjjsj+ErR1t0ve7faOBy +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.key b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.key new file mode 100644 index 0000000000000..a6de1f9958d32 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,0F6B57727499DA47 + +OmK77UnFtk/zNEbNTxNJz73D2XWFDWLyHCDZPEXkX55vch/pXkkfVbWbPBFv35nA +LKni0j802Qnc1D4V3BUSmVWHk9SfjI5nlcDkSELbgCOpuZkf6Bmk8FgLfV42BFxn +lAiY+oBB4VV+rxA+HUV6CiWWrTgSjkvFyXCBZzcTEPdF2ifWerjsWKOjQZJtmvMX +J5DhYCCp1/n4R/OQpYxQiOqJdUxbKx4k0h139ySK2PggdL17w1a7AuQnHwJO3+ic +1IntPKD/ZhpAPPzq8A5R5jZyvrSj9Dgv94PXAQ5xTZWnZd2nuJtbkrYJ47pBR3Re +R2aZdF/N8ljG1TYHuJXdiL3A80Y3AS00TFNgSAZKSz5Ktt6zI2EAZu9xdHd8EfUm +m3qJmfce9P9cCBzo7DLGHwRMfu9hEFWN9dRD8KWNcB+ahQ1/jItzi25yZM6vD6+S +ZVUzegybeYlMwPks3YObX9IdUSwAd9F76SVwHCsziKQW4RfETaShG/oRNqq04nqA +E//KUl5bfTuv8jumyMlg6iiqIDQAUvzI74mWe2lIy6rglm2rR39SN4NxSrnTwoz4 +KAf+kHWJVyxFqEYs+dqboRWpRfQac3+iYoIlZFob/nRhNyKnccTkHtjh7+1C8CXI +sYXhuJZLCoiXh990M9t1ct0hqfWLNALlEsJesfRG8/fvi+LZd9i3fyCjrM+z96/G +/2zQzdga4bOs3ZEBluYFYkhHRJw1rAF3LTcWYvjP0gjZYVQki7AsLb0me1selS6O +P1bXaLaSUvMsAVO0wOtHMXAoBgEybP4+OonLiMScjdQZ2KRQ8L8OwzuGt0yguPRy +7wQv4NrH8LQu+X7tlQox28kascZUNHxORbh9M/wWx/2htw88uXWb5vxbDe30Rras +mTg0Gxky/88ZWvYxr7PlhBRrrfkJQ9sF/RyygUFhpQaXTwspkpF+MZv+1X6ROHqR +OueSa606FrptZ5n4RRPjq0hVZQgWKMAlIxNSum+gFn/Z7Q9I6gKrGFxjkD65L1kK +BbvbHAomiTyphrMtBRP52VqsFr4NxCWzxr/ZSlwaxTEid2vYg3zm7ls4dHYjUiNR +cs/JZJTkXn2aVaILSQkr9/I0eOOH9t/APSXHY8urQuYsDdmOOL7J2tlh3w1ivP8A +vVeomdUr2jgn53pBzbaLlTfsZ9+UneuLcztLfqN+BydQq1bKWvn2j3GvUkmhE//M ++fpo+uGlslMLh8rjtRH1y9rtCKhLgIxLO4U/ZJksFcJAqF3mR+Xxkrf82LUrAg8x +Oj++3QhOJE7f+vKog8b0gGrySSwzII2Ar7KiJDVJaZpmbbXToBlcC7xoksN3Ra0E +15WxKBSRqb7gi2+ml02rwtFMzq93H05Uoa9mG8uf1QH8t/+o6fniFx5N5kKWmPMy +shXjaYg7NzEBAkxI4VO41faMxEj/CUV0klQDPbnAsTCrcYu7CS2lml3e0zVf6RB8 +plXee99DiWpHZTRoGzpInK3DpnGRP1Frgl1KyhT+HayFZeYSMHfVSFPk3CKKmtEp +r+J/SrpGnEx0NKK3f+MxflZfnMIvgjGxkHdgSaDpz9iTjveq176Bq1GmNLALotOq +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.crt b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.crt new file mode 100644 index 0000000000000..8ffb02e3d5794 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIVAOSHUsKiRx+ekWEEmfI2Q2q3B5hoMA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTE5MDQzMDAzNTU0NloXDTI4MTEyODAzNTU0NlowFDESMBAG +A1UEAxMJdHJhbnNwb3J0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +wBaoGJ9vv9yFxCOg24CsVfwSThOPnea8oujexGZYDgKkCdtcVn03tlyomjOra/dL +PJ0zOvUyktTxv022VQNhkJ/PO+w/NKpHBHaAVZE0o2zvUf8xQqXoHw0S6rAhurs5 +50r8QRkh1Z3ky3uOcFs0pXYCR/2ZVmQNSBhqmhUSK5y0VURot1MtPMw1SeqyabZQ +upDTJ6um/zk2LalfChKJ3vGQGEW7AGfv10eIWSmqQx6rLWAGO4MDelbZhUUr5iFc +D4fW0/MNUXJHTBO5Dyq6n63Wsm0jTYK72bSVw8LZS+uabQCtcHtKUZh38uUEUCjp +MDVY7YmDv0i8qx/MvWasbwIDAQABo4HgMIHdMB0GA1UdDgQWBBQwoESvk9jbbTax +/+c5MCAFEvWW5TAfBgNVHSMEGDAWgBRI4mOaeunbu60GfjWTpHcvhb6/YTCBjwYD +VR0RBIGHMIGEgglsb2NhbGhvc3SCF2xvY2FsaG9zdDYubG9jYWxkb21haW42hwR/ +AAABhxAAAAAAAAAAAAAAAAAAAAABggpsb2NhbGhvc3Q0ggpsb2NhbGhvc3Q2ghVs +b2NhbGhvc3QubG9jYWxkb21haW6CF2xvY2FsaG9zdDQubG9jYWxkb21haW40MAkG +A1UdEwQCMAAwDQYJKoZIhvcNAQELBQADggEBAIQ8/PLfsZ1eKOWW74a4h/Uh5eh8 +u9Led1v+U9tszmULN8JoYSEgyql6zy2pJOuIVLwI9cUvrcypUSDL53NmWhTGAjEL +jbww/G1cngBh5cBzAPq3lRL2lwc8j3ZZ16I1eNyWastvBDdtANlDArCUamZoboBm +HE/jrssC9DOQhxAraiitH3YqjquqztEp1zIuqRI0qYTDFNPzyfyXIyCFIT+3eVI5 +22MqjFL+9IDuoET+VU1i22LhF32TEPotz2hfZTFddql0V1IOJQuVkDkQGFvaJMFy +Xw7d4orV3sxzQYd7muCoaao7g/F675KqpZiiVHqKxTOLafF/MPcfLhH6xZk= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.key b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.key new file mode 100644 index 0000000000000..f540e17202492 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,0B9EFA0829A750FB + +NCrPD7gkQ4Jr5/xIiohWILW3nO/WmNjApqOIc5g/wX/xJpk/554f8zCZ8dUD0D2E +ZW+z7Yj8GWKB0E6+hQZ+3ZUHLYASYSpSDVjg8UaaCxQyoVcUhshahFprqlzgU/An +Er8TbrGvhH0VmNlcQhaImqCOk41Hf8gjrxrtoLKbk3DfTk/Uuv4Jlsz4X+oSBVZN +fezIN70IZvGLKu7O3T9DeVLV1bLL6hNGIXnYe+FzLomMck2UoFv6uGS1VyFIGNf0 +ly80NGgdWTGxzLmiiGCgm5gbqbIehMsei1CC3jZIcfgfGyp4NVvF4HxFxZLTR3kY +YqzBWta/PoY6XXOlLFZupYt/YMt9hU6It9xdudPyNxwSuFXU66Fc08Ljj151iyhv +Ggf88jo9xSVvKOlqqHN6dY/xo9CfzTyuldG4jsKVHgGosSGghksjZ+PpHc7Mo5aP +S/UofhQgApJgU30TQPiQuJ+my/h9CiJyIgP7HnZtltwxg1k3dj+LxlpRKvjTOfuc +epOFmPeIdPkrQDir0j9+h+yoMgeqoT2unUYXw/qx5SVQxB5ckajLmJkUJPej9U3O +wASqNcWCTBEkGt102RU8o6lywdzBvfTB7gegR6oDvRfaxHOiUrRT/IwgszRfIdoC +fZa7Pb9pUuR3oY4uduDYgIKnxJhhQF2ERVXsfQeyxdiHEXvRnBFoAhoDjO8rWv07 +xiFPVMCAqXPImmdI34QezuzV2MUIVlKyeovbf+Kjv/Uat3zTj5FbmyVHcmPXpTY7 +t5iTQG+nQwz6UGcM5lF40EWrRdCzHEXNszwEY3Oz8D5rgBa6kxHYjcG9rzbTGlk2 +gsKdKA0am0hnCCJdTxbK5AkDcCWn/eclw0RPpbhFv5anvHTJ5WAWE7ZaACRuSfvy +UbNRGiWo4cNcR7+PGgV5184zjwJOql1mz+I79tlpxtK/FazP61WAYKOeEx1paKXX +syq+WDWgoZu/RzKDyTu10NUgq9J/IXDBn8/JjOVPCmPhMMLxNdoUhMfO4Ij9+3Jv +mH6ZaU6E+NZuc5N4Ivws42PwNY9FoyuLLgMBbezjhepQrDveHUK5v0weWqEapZ7Z +4KkFAeK7pjuItn5Of+233cp9Y68G8NrwMLQzI23kebNJwwzUMf3DnUJCXiy3PvrF +WpA0Q6/FspJgG3x2AXKo2QsHxydW+4w4pkawS9TCl0E03D7V6Gf17/HOxPDSH972 ++Yzzv8IkaOw5g+paeX9+tHjDFaxuvKiFyn/J7xYZAAQUoa2uQu440RakE73qLO34 +wtWdRzvIYitwLNJSfSojQDNoXuv8eyI/hP573cs6pmbheKXG1XKsWfpfj8sI7OkH +AdjRyeToSKbZ8yCn2vp0jyaRocOucu5oo7c0v+IocWOgdw+913EToJ6G3ck1heVR +b/U04VqKkXowO1YK7xDBAalMxyWq40spIKCC8HBBlng3vfUKqF46q9bMpesXnwPr +/00JfDVhFbqkJbqB8UYpjs9MN+vV5A7lsYbObom4pV25FSnwNSyxK0bhWGfZgutI +pjeQDkvHNG606AsqLz6SmIJP/GBBSMwvT3PGMPOO5XcayKeK3cbOQYJ0Yh7Muoqe +-----END RSA PRIVATE KEY----- From f7a4f92719fea344916957e12176886fbb25e8e2 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 20 May 2019 04:02:01 -0400 Subject: [PATCH 124/321] Fix random failure in SearchRequestTests#testRandomVersionSerialization (#42069) This commit fixes a test bug that ends up comparing the result of two consecutive calls to System.currentTimeMillis that can be different on slow CIs. Closes #42064 --- .../org/elasticsearch/action/search/SearchRequest.java | 9 ++++++++- .../elasticsearch/action/search/SearchRequestTests.java | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 18c2f529a711d..6b641906d2e32 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -286,12 +286,19 @@ boolean isFinalReduce() { * ensure that the same value, determined by the coordinating node, is used on all nodes involved in the execution of the search * request. When created through {@link #crossClusterSearch(SearchRequest, String[], String, long, boolean)}, this method returns * the provided current time, otherwise it will return {@link System#currentTimeMillis()}. - * */ long getOrCreateAbsoluteStartMillis() { return absoluteStartMillis == DEFAULT_ABSOLUTE_START_MILLIS ? System.currentTimeMillis() : absoluteStartMillis; } + /** + * Returns the provided absoluteStartMillis when created through {@link #crossClusterSearch} and + * -1 otherwise. + */ + long getAbsoluteStartMillis() { + return absoluteStartMillis; + } + /** * Sets the indices the search will be executed on. */ diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index a0c4626e9543f..8f1d89a37daaa 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -80,7 +80,7 @@ public void testRandomVersionSerialization() throws IOException { SearchRequest deserializedRequest = copyWriteable(searchRequest, namedWriteableRegistry, SearchRequest::new, version); assertEquals(searchRequest.isCcsMinimizeRoundtrips(), deserializedRequest.isCcsMinimizeRoundtrips()); assertEquals(searchRequest.getLocalClusterAlias(), deserializedRequest.getLocalClusterAlias()); - assertEquals(searchRequest.getOrCreateAbsoluteStartMillis(), deserializedRequest.getOrCreateAbsoluteStartMillis()); + assertEquals(searchRequest.getAbsoluteStartMillis(), deserializedRequest.getAbsoluteStartMillis()); assertEquals(searchRequest.isFinalReduce(), deserializedRequest.isFinalReduce()); } From 307bc17f05163a553df1aede6dae439d5516dc50 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 20 May 2019 12:26:33 +0300 Subject: [PATCH 125/321] Hash token values for storage (#41792) This commit changes how access tokens and refresh tokens are stored in the tokens index. Access token values are now hashed before being stored in the id field of the `user_token` and before becoming part of the token document id. Refresh token values are hashed before being stored in the token field of the `refresh_token`. The tokens are hashed without a salt value since these are v4 UUID values that have enough entropy themselves. Both rainbow table attacks and offline brute force attacks are impractical. As a side effect of this change and in order to support multiple concurrent refreshes as introduced in #39631, upon refreshing an pair, the superseding access token and refresh tokens values are stored in the superseded token doc, encrypted with a key that is derived from the superseded refresh token. As such, subsequent requests to refresh the same token in the predefined time window will return the same superseding access token and refresh token values, without hitting the tokens index (as this only stores hashes of the token values). AES in GCM mode is used for encrypting the token values and the key derivation from the superseded refresh token uses a small number of iterations as it needs to be quick. For backwards compatibility reasons, the new behavior is only enabled when all nodes in a cluster are in the required version so that old nodes can cope with the token values in a mixed cluster during a rolling upgrade. --- .../core/security/authc/support/Hasher.java | 18 + .../resources/security-index-template-7.json | 15 +- .../security-tokens-index-template-7.json | 15 +- ...nsportOpenIdConnectAuthenticateAction.java | 6 +- .../saml/TransportSamlAuthenticateAction.java | 3 +- .../token/TransportCreateTokenAction.java | 3 +- .../token/TransportRefreshTokenAction.java | 4 +- .../xpack/security/authc/TokenService.java | 510 +++++++++++------- .../xpack/security/authc/UserToken.java | 2 +- ...ansportOpenIdConnectLogoutActionTests.java | 19 +- ...sportSamlInvalidateSessionActionTests.java | 33 +- .../saml/TransportSamlLogoutActionTests.java | 22 +- .../authc/AuthenticationServiceTests.java | 20 +- .../security/authc/TokenServiceTests.java | 325 ++++++----- .../security/authc/support/HasherTests.java | 4 + 15 files changed, 627 insertions(+), 372 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java index 492622b2c519c..28f263748135f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java @@ -351,6 +351,24 @@ public boolean verify(SecureString text, char[] hash) { return CharArrays.constantTimeEquals(computedHash, new String(saltAndHash, 12, saltAndHash.length - 12)); } }, + /* + * Unsalted SHA-256 , not suited for password storage. + */ + SHA256() { + @Override + public char[] hash(SecureString text) { + MessageDigest md = MessageDigests.sha256(); + md.update(CharArrays.toUtf8Bytes(text.getChars())); + return Base64.getEncoder().encodeToString(md.digest()).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + MessageDigest md = MessageDigests.sha256(); + md.update(CharArrays.toUtf8Bytes(text.getChars())); + return CharArrays.constantTimeEquals(Base64.getEncoder().encodeToString(md.digest()).toCharArray(), hash); + } + }, NOOP() { @Override diff --git a/x-pack/plugin/core/src/main/resources/security-index-template-7.json b/x-pack/plugin/core/src/main/resources/security-index-template-7.json index ebf6d073cd8a6..dae6462b7a6f0 100644 --- a/x-pack/plugin/core/src/main/resources/security-index-template-7.json +++ b/x-pack/plugin/core/src/main/resources/security-index-template-7.json @@ -213,8 +213,19 @@ "type": "date", "format": "epoch_millis" }, - "superseded_by": { - "type": "keyword" + "superseding": { + "type": "object", + "properties": { + "encrypted_tokens": { + "type": "binary" + }, + "encryption_iv": { + "type": "binary" + }, + "encryption_salt": { + "type": "binary" + } + } }, "invalidated" : { "type" : "boolean" diff --git a/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json b/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json index e7450d0be9c28..312d9ff9e3f58 100644 --- a/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json +++ b/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json @@ -35,8 +35,19 @@ "type": "date", "format": "epoch_millis" }, - "superseded_by": { - "type": "keyword" + "superseding": { + "type": "object", + "properties": { + "encrypted_tokens": { + "type": "binary" + }, + "encryption_iv": { + "type": "binary" + }, + "encryption_salt": { + "type": "binary" + } + } }, "invalidated" : { "type" : "boolean" diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java index 1b4aff064a0c3..4bab16cf92115 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java @@ -7,6 +7,8 @@ import com.nimbusds.oauth2.sdk.id.State; import com.nimbusds.openid.connect.sdk.Nonce; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -36,6 +38,7 @@ public class TransportOpenIdConnectAuthenticateAction private final ThreadPool threadPool; private final AuthenticationService authenticationService; private final TokenService tokenService; + private static final Logger logger = LogManager.getLogger(TransportOpenIdConnectAuthenticateAction.class); @Inject public TransportOpenIdConnectAuthenticateAction(ThreadPool threadPool, TransportService transportService, @@ -67,9 +70,8 @@ protected void doExecute(Task task, OpenIdConnectAuthenticateRequest request, .get(OpenIdConnectRealm.CONTEXT_TOKEN_DATA); tokenService.createOAuth2Tokens(authentication, originatingAuthentication, tokenMetadata, true, ActionListener.wrap(tuple -> { - final String tokenString = tokenService.getAccessTokenAsString(tuple.v1()); final TimeValue expiresIn = tokenService.getExpirationDelay(); - listener.onResponse(new OpenIdConnectAuthenticateResponse(authentication.getUser().principal(), tokenString, + listener.onResponse(new OpenIdConnectAuthenticateResponse(authentication.getUser().principal(), tuple.v1(), tuple.v2(), expiresIn)); }, listener::onFailure)); }, e -> { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java index 6b61742eed262..96eec7e8fd6c7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java @@ -63,10 +63,9 @@ protected void doExecute(Task task, SamlAuthenticateRequest request, ActionListe final Map tokenMeta = (Map) result.getMetadata().get(SamlRealm.CONTEXT_TOKEN_DATA); tokenService.createOAuth2Tokens(authentication, originatingAuthentication, tokenMeta, true, ActionListener.wrap(tuple -> { - final String tokenString = tokenService.getAccessTokenAsString(tuple.v1()); final TimeValue expiresIn = tokenService.getExpirationDelay(); listener.onResponse( - new SamlAuthenticateResponse(authentication.getUser().principal(), tokenString, tuple.v2(), expiresIn)); + new SamlAuthenticateResponse(authentication.getUser().principal(), tuple.v1(), tuple.v2(), expiresIn)); }, listener::onFailure)); }, e -> { logger.debug(() -> new ParameterizedMessage("SamlToken [{}] could not be authenticated", saml), e); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java index 4b648d5ed4bc0..65456ccd2af51 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java @@ -88,9 +88,8 @@ private void createToken(CreateTokenRequest request, Authentication authenticati boolean includeRefreshToken, ActionListener listener) { tokenService.createOAuth2Tokens(authentication, originatingAuth, Collections.emptyMap(), includeRefreshToken, ActionListener.wrap(tuple -> { - final String tokenStr = tokenService.getAccessTokenAsString(tuple.v1()); final String scope = getResponseScopeValue(request.getScope()); - final CreateTokenResponse response = new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, + final CreateTokenResponse response = new CreateTokenResponse(tuple.v1(), tokenService.getExpirationDelay(), scope, tuple.v2()); listener.onResponse(response); }, listener::onFailure)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java index 71aeb64bc4276..5c161d889cfb1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java @@ -31,11 +31,9 @@ public TransportRefreshTokenAction(TransportService transportService, ActionFilt @Override protected void doExecute(Task task, CreateTokenRequest request, ActionListener listener) { tokenService.refreshToken(request.getRefreshToken(), ActionListener.wrap(tuple -> { - final String tokenStr = tokenService.getAccessTokenAsString(tuple.v1()); final String scope = getResponseScopeValue(request.getScope()); - final CreateTokenResponse response = - new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, tuple.v2()); + new CreateTokenResponse(tuple.v1(), tokenService.getExpirationDelay(), scope, tuple.v2()); listener.onResponse(response); }, listener::onFailure)); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 6f96c9bf7dd88..ec5086201c68e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -86,6 +86,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.AuthenticationType; import org.elasticsearch.xpack.core.security.authc.KeyAndTimestamp; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.TokensInvalidationResult; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -157,11 +158,12 @@ public final class TokenService { * Cheat Sheet and the * NIST Digital Identity Guidelines */ - private static final int ITERATIONS = 100000; + static final int TOKEN_SERVICE_KEY_ITERATIONS = 100000; + static final int TOKENS_ENCRYPTION_KEY_ITERATIONS = 1024; private static final String KDF_ALGORITHM = "PBKDF2withHMACSHA512"; - private static final int SALT_BYTES = 32; + static final int SALT_BYTES = 32; private static final int KEY_BYTES = 64; - private static final int IV_BYTES = 12; + static final int IV_BYTES = 12; private static final int VERSION_BYTES = 4; private static final String ENCRYPTION_CIPHER = "AES/GCM/NoPadding"; private static final String EXPIRED_TOKEN_WWW_AUTH_VALUE = "Bearer realm=\"" + XPackField.SECURITY + @@ -179,14 +181,18 @@ public final class TokenService { TimeValue.MINUS_ONE, Property.NodeScope); static final String TOKEN_DOC_TYPE = "token"; + private static final int HASHED_TOKEN_LENGTH = 44; + // UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars + private static final int TOKEN_LENGTH = 22; private static final String TOKEN_DOC_ID_PREFIX = TOKEN_DOC_TYPE + "_"; - static final int MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1; + static final int LEGACY_MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1; + static final int MINIMUM_BYTES = VERSION_BYTES + TOKEN_LENGTH + 1; + static final int LEGACY_MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * LEGACY_MINIMUM_BYTES) / 3)).intValue(); static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue(); + static final Version VERSION_HASHED_TOKENS = Version.V_8_0_0; static final Version VERSION_TOKENS_INDEX_INTRODUCED = Version.V_7_2_0; static final Version VERSION_ACCESS_TOKENS_AS_UUIDS = Version.V_7_2_0; static final Version VERSION_MULTIPLE_CONCURRENT_REFRESHES = Version.V_7_2_0; - // UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars - private static final int TOKEN_ID_LENGTH = 22; private static final Logger logger = LogManager.getLogger(TokenService.class); private final SecureRandom secureRandom = new SecureRandom(); @@ -235,31 +241,71 @@ public TokenService(Settings settings, Clock clock, Client client, XPackLicenseS } /** - * Creates an access token and optionally a refresh token as well, based on the provided authentication and metadata with an - * auto-generated token document id. The created tokens are stored in the security index. + * Creates an access token and optionally a refresh token as well, based on the provided authentication and metadata with + * auto-generated values. The created tokens are stored in the security index for versions up to + * {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a specific security tokens index for later versions. */ - public void createOAuth2Tokens(Authentication authentication, Authentication originatingClientAuth, - Map metadata, boolean includeRefreshToken, - ActionListener> listener) { + public void createOAuth2Tokens(Authentication authentication, Authentication originatingClientAuth, Map metadata, + boolean includeRefreshToken, ActionListener> listener) { // the created token is compatible with the oldest node version in the cluster final Version tokenVersion = getTokenVersionCompatibility(); // tokens moved to a separate index in newer versions final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); // the id of the created tokens ought be unguessable - final String userTokenId = UUIDs.randomBase64UUID(); - createOAuth2Tokens(userTokenId, tokenVersion, tokensIndex, authentication, originatingClientAuth, metadata, includeRefreshToken, - listener); + final String accessToken = UUIDs.randomBase64UUID(); + final String refreshToken = includeRefreshToken ? UUIDs.randomBase64UUID() : null; + createOAuth2Tokens(accessToken, refreshToken, tokenVersion, tokensIndex, authentication, originatingClientAuth, metadata, listener); } /** - * Create an access token and optionally a refresh token as well, based on the provided authentication and metadata, with the given - * token document id. The created tokens are be stored in the security index. + * Creates an access token and optionally a refresh token as well from predefined values, based on the provided authentication and + * metadata. The created tokens are stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a + * specific security tokens index for later versions. + */ + //public for testing + public void createOAuth2Tokens(String accessToken, String refreshToken, Authentication authentication, + Authentication originatingClientAuth, + Map metadata, ActionListener> listener) { + // the created token is compatible with the oldest node version in the cluster + final Version tokenVersion = getTokenVersionCompatibility(); + // tokens moved to a separate index in newer versions + final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); + createOAuth2Tokens(accessToken, refreshToken, tokenVersion, tokensIndex, authentication, originatingClientAuth, metadata, listener); + } + + /** + * Create an access token and optionally a refresh token as well from predefined values, based on the provided authentication and + * metadata. + * + * @param accessToken The predefined seed value for the access token. This will then be + *

    + *
  • Encrypted before stored for versions before {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in a specific security tokens index for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Prepended with a version ID and encoded with Base64 before returned to the caller of the APIs
  • + *
+ * @param refreshToken The predefined seed value for the access token. This will then be + *
    + *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in a specific security tokens index for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Prepended with a version ID and encoded with Base64 before returned to the caller of the APIs for + * versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
+ * @param tokenVersion The version of the nodes with which these tokens will be compatible. + * @param tokensIndex The security tokens index + * @param authentication The authentication object representing the user for which the tokens are created + * @param originatingClientAuth The authentication object representing the client that called the related API + * @param metadata A map with metadata to be stored in the token document + * @param listener The listener to call upon completion with a {@link Tuple} containing the + * serialized access token and serialized refresh token as these will be returned to the client */ - private void createOAuth2Tokens(String userTokenId, Version tokenVersion, SecurityIndexManager tokensIndex, + private void createOAuth2Tokens(String accessToken, String refreshToken, Version tokenVersion, SecurityIndexManager tokensIndex, Authentication authentication, Authentication originatingClientAuth, Map metadata, - boolean includeRefreshToken, ActionListener> listener) { - assert userTokenId.length() == TOKEN_ID_LENGTH : "We assume token ids have a fixed length for nodes of a certain version." - + " When changing the token length, be careful that the inferences about its length still hold."; + ActionListener> listener) { + assert accessToken.length() == TOKEN_LENGTH : "We assume token ids have a fixed length for nodes of a certain version." + + " When changing the token length, be careful that the inferences about its length still hold."; ensureEnabled(); if (authentication == null) { listener.onFailure(traceLog("create token", new IllegalArgumentException("authentication must be provided"))); @@ -269,10 +315,19 @@ private void createOAuth2Tokens(String userTokenId, Version tokenVersion, Securi } else { final Authentication tokenAuth = new Authentication(authentication.getUser(), authentication.getAuthenticatedBy(), authentication.getLookedUpBy(), tokenVersion, AuthenticationType.TOKEN, authentication.getMetadata()); - final UserToken userToken = new UserToken(userTokenId, tokenVersion, tokenAuth, getExpirationTime(), metadata); - final String plainRefreshToken = includeRefreshToken ? UUIDs.randomBase64UUID() : null; - final BytesReference tokenDocument = createTokenDocument(userToken, plainRefreshToken, originatingClientAuth); - final String documentId = getTokenDocumentId(userToken); + final String storedAccessToken; + final String storedRefreshToken; + if (tokenVersion.onOrAfter(VERSION_HASHED_TOKENS)) { + storedAccessToken = hashTokenString(accessToken); + storedRefreshToken = (null == refreshToken) ? null : hashTokenString(refreshToken); + } else { + storedAccessToken = accessToken; + storedRefreshToken = refreshToken; + } + final UserToken userToken = new UserToken(storedAccessToken, tokenVersion, tokenAuth, getExpirationTime(), metadata); + final BytesReference tokenDocument = createTokenDocument(userToken, storedRefreshToken, originatingClientAuth); + final String documentId = getTokenDocumentId(storedAccessToken); + final IndexRequest indexTokenRequest = client.prepareIndex(tokensIndex.aliasName(), SINGLE_MAPPING_NAME, documentId) .setOpType(OpType.CREATE) .setSource(tokenDocument, XContentType.JSON) @@ -283,15 +338,17 @@ private void createOAuth2Tokens(String userTokenId, Version tokenVersion, Securi () -> executeAsyncWithOrigin(client, SECURITY_ORIGIN, IndexAction.INSTANCE, indexTokenRequest, ActionListener.wrap(indexResponse -> { if (indexResponse.getResult() == Result.CREATED) { + final String versionedAccessToken = prependVersionAndEncodeAccessToken(tokenVersion, accessToken); if (tokenVersion.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { - final String versionedRefreshToken = plainRefreshToken != null - ? prependVersionAndEncode(tokenVersion, plainRefreshToken) - : null; - listener.onResponse(new Tuple<>(userToken, versionedRefreshToken)); + final String versionedRefreshToken = refreshToken != null + ? prependVersionAndEncodeRefreshToken(tokenVersion, refreshToken) + : null; + listener.onResponse(new Tuple<>(versionedAccessToken, versionedRefreshToken)); } else { - // prior versions are not version-prepended, as nodes on those versions don't expect it. + // prior versions of the refresh token are not version-prepended, as nodes on those + // versions don't expect it. // Such nodes might exist in a mixed cluster during a rolling upgrade. - listener.onResponse(new Tuple<>(userToken, plainRefreshToken)); + listener.onResponse(new Tuple<>(versionedAccessToken, refreshToken)); } } else { listener.onFailure(traceLog("create token", @@ -301,6 +358,15 @@ private void createOAuth2Tokens(String userTokenId, Version tokenVersion, Securi } } + /** + * Hashes an access or refresh token String so that it can safely be persisted in the index. We don't salt + * the values as these are v4 UUIDs that have enough entropy by themselves. + */ + // public for testing + public static String hashTokenString(String accessTokenString) { + return new String(Hasher.SHA256.hash(new SecureString(accessTokenString.toCharArray()))); + } + /** * Looks in the context to see if the request provided a header with a user token and if so the * token is validated, which might include authenticated decryption and verification that the token @@ -406,13 +472,24 @@ void decodeToken(String token, ActionListener listener) { final Version version = Version.readVersion(in); in.setVersion(version); if (version.onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { - // The token was created in a > VERSION_ACCESS_TOKENS_UUIDS cluster so it contains the tokenId as a String - String usedTokenId = in.readString(); - getUserTokenFromId(usedTokenId, version, listener); + // The token was created in a > VERSION_ACCESS_TOKENS_UUIDS cluster + if (in.available() < MINIMUM_BYTES) { + logger.debug("invalid token, smaller than [{}] bytes", MINIMUM_BYTES); + listener.onResponse(null); + return; + } + final String accessToken = in.readString(); + // TODO Remove this conditional after backporting to 7.x + if (version.onOrAfter(VERSION_HASHED_TOKENS)) { + final String userTokenId = hashTokenString(accessToken); + getUserTokenFromId(userTokenId, version, listener); + } else { + getUserTokenFromId(accessToken, version, listener); + } } else { // The token was created in a < VERSION_ACCESS_TOKENS_UUIDS cluster so we need to decrypt it to get the tokenId - if (in.available() < MINIMUM_BASE64_BYTES) { - logger.debug("invalid token, smaller than [{}] bytes", MINIMUM_BASE64_BYTES); + if (in.available() < LEGACY_MINIMUM_BYTES) { + logger.debug("invalid token, smaller than [{}] bytes", LEGACY_MINIMUM_BYTES); listener.onResponse(null); return; } @@ -709,8 +786,12 @@ private void indexInvalidation(Collection tokenIds, SecurityIndexManager /** * Called by the transport action in order to start the process of refreshing a token. + * + * @param refreshToken The refresh token as provided by the client + * @param listener The listener to call upon completion with a {@link Tuple} containing the + * serialized access token and serialized refresh token as these will be returned to the client */ - public void refreshToken(String refreshToken, ActionListener> listener) { + public void refreshToken(String refreshToken, ActionListener> listener) { ensureEnabled(); final Instant refreshRequested = clock.instant(); final Iterator backoff = DEFAULT_BACKOFF.iterator(); @@ -718,36 +799,49 @@ public void refreshToken(String refreshToken, ActionListener { final Authentication clientAuth = Authentication.readFromContext(client.threadPool().getThreadContext()); - innerRefresh(tokenDocHit.getId(), tokenDocHit.getSourceAsMap(), tokenDocHit.getSeqNo(), tokenDocHit.getPrimaryTerm(), - clientAuth, backoff, refreshRequested, listener); + innerRefresh(refreshToken, tokenDocHit.getId(), tokenDocHit.getSourceAsMap(), tokenDocHit.getSeqNo(), + tokenDocHit.getPrimaryTerm(), + clientAuth, backoff, refreshRequested, listener); }, listener::onFailure)); } /** - * Inferes the format and version of the passed in {@code refreshToken}. Delegates the actual search of the token document to + * Infers the format and version of the passed in {@code refreshToken}. Delegates the actual search of the token document to * {@code #findTokenFromRefreshToken(String, SecurityIndexManager, Iterator, ActionListener)} . */ private void findTokenFromRefreshToken(String refreshToken, Iterator backoff, ActionListener listener) { - if (refreshToken.length() == TOKEN_ID_LENGTH) { + if (refreshToken.length() == TOKEN_LENGTH) { // first check if token has the old format before the new version-prepended one logger.debug("Assuming an unversioned refresh token [{}], generated for node versions" - + " prior to the introduction of the version-header format.", refreshToken); + + " prior to the introduction of the version-header format.", refreshToken); findTokenFromRefreshToken(refreshToken, securityMainIndex, backoff, listener); } else { - try { - final Tuple versionAndRefreshTokenTuple = unpackVersionAndPayload(refreshToken); - final Version refreshTokenVersion = versionAndRefreshTokenTuple.v1(); - final String unencodedRefreshToken = versionAndRefreshTokenTuple.v2(); - if (false == refreshTokenVersion.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED) - || unencodedRefreshToken.length() != TOKEN_ID_LENGTH) { - logger.debug("Decoded refresh token [{}] with version [{}] is invalid.", unencodedRefreshToken, refreshTokenVersion); + if (refreshToken.length() == HASHED_TOKEN_LENGTH) { + logger.debug("Assuming a hashed refresh token [{}] retrieved from the tokens index", refreshToken); + findTokenFromRefreshToken(refreshToken, securityTokensIndex, backoff, listener); + } else { + logger.debug("Assuming a refresh token [{}] provided from a client", refreshToken); + try { + final Tuple versionAndRefreshTokenTuple = unpackVersionAndPayload(refreshToken); + final Version refreshTokenVersion = versionAndRefreshTokenTuple.v1(); + final String unencodedRefreshToken = versionAndRefreshTokenTuple.v2(); + if (refreshTokenVersion.before(VERSION_TOKENS_INDEX_INTRODUCED) || unencodedRefreshToken.length() != TOKEN_LENGTH) { + logger.debug("Decoded refresh token [{}] with version [{}] is invalid.", unencodedRefreshToken, + refreshTokenVersion); + listener.onFailure(malformedTokenException()); + } else { + // TODO Remove this conditional after backporting to 7.x + if (refreshTokenVersion.onOrAfter(VERSION_HASHED_TOKENS)) { + final String hashedRefreshToken = hashTokenString(unencodedRefreshToken); + findTokenFromRefreshToken(hashedRefreshToken, securityTokensIndex, backoff, listener); + } else { + findTokenFromRefreshToken(unencodedRefreshToken, securityTokensIndex, backoff, listener); + } + } + } catch (IOException e) { + logger.debug(() -> new ParameterizedMessage("Could not decode refresh token [{}].", refreshToken), e); listener.onFailure(malformedTokenException()); - } else { - findTokenFromRefreshToken(unencodedRefreshToken, securityTokensIndex, backoff, listener); } - } catch (IOException e) { - logger.debug("Could not decode refresh token [" + refreshToken + "].", e); - listener.onFailure(malformedTokenException()); } } } @@ -763,7 +857,7 @@ private void findTokenFromRefreshToken(String refreshToken, SecurityIndexManager final Consumer maybeRetryOnFailure = ex -> { if (backoff.hasNext()) { final TimeValue backofTimeValue = backoff.next(); - logger.debug("retrying after [" + backofTimeValue + "] back off"); + logger.debug("retrying after [{}] back off", backofTimeValue); final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() .preserveContext(() -> findTokenFromRefreshToken(refreshToken, tokensIndexManager, backoff, listener)); client.threadPool().schedule(retryWithContextRunnable, backofTimeValue, GENERIC); @@ -821,13 +915,14 @@ private void findTokenFromRefreshToken(String refreshToken, SecurityIndexManager * supersedes this one. The new document that contains the new access token and refresh token is created and finally the new access * token and refresh token are returned to the listener. */ - private void innerRefresh(String tokenDocId, Map source, long seqNo, long primaryTerm, Authentication clientAuth, - Iterator backoff, Instant refreshRequested, ActionListener> listener) { + private void innerRefresh(String refreshToken, String tokenDocId, Map source, long seqNo, long primaryTerm, + Authentication clientAuth, Iterator backoff, Instant refreshRequested, + ActionListener> listener) { logger.debug("Attempting to refresh token stored in token document [{}]", tokenDocId); final Consumer onFailure = ex -> listener.onFailure(traceLog("refresh token", tokenDocId, ex)); final Tuple> checkRefreshResult; try { - checkRefreshResult = checkTokenDocumentForRefresh(clock.instant(), clientAuth, source); + checkRefreshResult = checkTokenDocumentForRefresh(refreshRequested, clientAuth, source); } catch (DateTimeException | IllegalStateException e) { onFailure.accept(new ElasticsearchSecurityException("invalid token document", e)); return; @@ -838,23 +933,29 @@ private void innerRefresh(String tokenDocId, Map source, long se } final RefreshTokenStatus refreshTokenStatus = checkRefreshResult.v1(); if (refreshTokenStatus.isRefreshed()) { - logger.debug("Token document [{}] was recently refreshed, when a new token document [{}] was generated. Reusing that result.", - tokenDocId, refreshTokenStatus.getSupersededBy()); - getSupersedingTokenDocAsyncWithRetry(refreshTokenStatus, backoff, listener); + logger.debug("Token document [{}] was recently refreshed, when a new token document was generated. Reusing that result.", + tokenDocId); + decryptAndReturnSupersedingTokens(refreshToken, refreshTokenStatus, listener); } else { - final String newUserTokenId = UUIDs.randomBase64UUID(); + final String newAccessTokenString = UUIDs.randomBase64UUID(); + final String newRefreshTokenString = UUIDs.randomBase64UUID(); final Version newTokenVersion = getTokenVersionCompatibility(); final Map updateMap = new HashMap<>(); updateMap.put("refreshed", true); - updateMap.put("refresh_time", clock.instant().toEpochMilli()); - if (newTokenVersion.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { - // the superseding token document reference is formated as "|"; - // for now, only the ".security-tokens|" is a valid reference format - updateMap.put("superseded_by", securityTokensIndex.aliasName() + "|" + getTokenDocumentId(newUserTokenId)); - } else { - // preservers the format of the reference (without the alias prefix) - // so that old nodes in a mixed cluster can still understand it - updateMap.put("superseded_by", getTokenDocumentId(newUserTokenId)); + if (newTokenVersion.onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { + updateMap.put("refresh_time", clock.instant().toEpochMilli()); + try { + final byte[] iv = getRandomBytes(IV_BYTES); + final byte[] salt = getRandomBytes(SALT_BYTES); + String encryptedAccessAndRefreshToken = encryptSupersedingTokens(newAccessTokenString, + newRefreshTokenString, refreshToken, iv, salt); + updateMap.put("superseding.encrypted_tokens", encryptedAccessAndRefreshToken); + updateMap.put("superseding.encryption_iv", Base64.getEncoder().encodeToString(iv)); + updateMap.put("superseding.encryption_salt", Base64.getEncoder().encodeToString(salt)); + } catch (GeneralSecurityException e) { + logger.warn("could not encrypt access token and refresh token string", e); + onFailure.accept(invalidGrantException("could not refresh the requested token")); + } } assert seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO : "expected an assigned sequence number"; assert primaryTerm != SequenceNumbers.UNASSIGNED_PRIMARY_TERM : "expected an assigned primary term"; @@ -875,14 +976,15 @@ private void innerRefresh(String tokenDocId, Map source, long se updateResponse.getGetResult().sourceAsMap())); final Tuple parsedTokens = parseTokensFromDocument(source, null); final UserToken toRefreshUserToken = parsedTokens.v1(); - createOAuth2Tokens(newUserTokenId, newTokenVersion, getTokensIndexForVersion(newTokenVersion), - toRefreshUserToken.getAuthentication(), clientAuth, toRefreshUserToken.getMetadata(), true, listener); + createOAuth2Tokens(newAccessTokenString, newRefreshTokenString, newTokenVersion, + getTokensIndexForVersion(newTokenVersion), toRefreshUserToken.getAuthentication(), clientAuth, + toRefreshUserToken.getMetadata(), listener); } else if (backoff.hasNext()) { logger.info("failed to update the original token document [{}], the update result was [{}]. Retrying", tokenDocId, updateResponse.getResult()); final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() - .preserveContext(() -> innerRefresh(tokenDocId, source, seqNo, primaryTerm, clientAuth, backoff, - refreshRequested, listener)); + .preserveContext(() -> innerRefresh(refreshToken, tokenDocId, source, seqNo, primaryTerm, clientAuth, + backoff, refreshRequested, listener)); client.threadPool().schedule(retryWithContextRunnable, backoff.next(), GENERIC); } else { logger.info("failed to update the original token document [{}] after all retries, the update result was [{}]. ", @@ -898,8 +1000,8 @@ private void innerRefresh(String tokenDocId, Map source, long se @Override public void onResponse(GetResponse response) { if (response.isExists()) { - innerRefresh(tokenDocId, response.getSource(), response.getSeqNo(), response.getPrimaryTerm(), - clientAuth, backoff, refreshRequested, listener); + innerRefresh(refreshToken, tokenDocId, response.getSource(), response.getSeqNo(), + response.getPrimaryTerm(), clientAuth, backoff, refreshRequested, listener); } else { logger.warn("could not find token document [{}] for refresh", tokenDocId); onFailure.accept(invalidGrantException("could not refresh the requested token")); @@ -927,8 +1029,8 @@ public void onFailure(Exception e) { if (backoff.hasNext()) { logger.debug("failed to update the original token document [{}], retrying", tokenDocId); final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() - .preserveContext(() -> innerRefresh(tokenDocId, source, seqNo, primaryTerm, clientAuth, backoff, - refreshRequested, listener)); + .preserveContext(() -> innerRefresh(refreshToken, tokenDocId, source, seqNo, primaryTerm, + clientAuth, backoff, refreshRequested, listener)); client.threadPool().schedule(retryWithContextRunnable, backoff.next(), GENERIC); } else { logger.warn("failed to update the original token document [{}], after all retries", tokenDocId); @@ -941,72 +1043,47 @@ public void onFailure(Exception e) { } } - private void getSupersedingTokenDocAsyncWithRetry(RefreshTokenStatus refreshTokenStatus, Iterator backoff, - ActionListener> listener) { - final Consumer onFailure = ex -> listener - .onFailure(traceLog("get superseding token", refreshTokenStatus.getSupersededBy(), ex)); - getSupersedingTokenDocAsync(refreshTokenStatus, new ActionListener() { - private final Consumer maybeRetryOnFailure = ex -> { - if (backoff.hasNext()) { - final TimeValue backofTimeValue = backoff.next(); - logger.debug("retrying after [" + backofTimeValue + "] back off"); - final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() - .preserveContext(() -> getSupersedingTokenDocAsync(refreshTokenStatus, this)); - client.threadPool().schedule(retryWithContextRunnable, backofTimeValue, GENERIC); - } else { - logger.warn("back off retries exhausted"); - onFailure.accept(ex); - } - }; - - @Override - public void onResponse(GetResponse response) { - if (response.isExists()) { - logger.debug("found superseding token document [{}] in index [{}] by following the [{}] reference", response.getId(), - response.getIndex(), refreshTokenStatus.getSupersededBy()); - final Tuple parsedTokens; - try { - parsedTokens = parseTokensFromDocument(response.getSource(), null); - } catch (IllegalStateException | DateTimeException e) { - logger.error("unable to decode existing user token", e); - listener.onFailure(new ElasticsearchSecurityException("could not refresh the requested token", e)); - return; - } - listener.onResponse(parsedTokens); - } else { - // We retry this since the creation of the superseding token document might already be in flight but not - // yet completed, triggered by a refresh request that came a few milliseconds ago - logger.info("could not find superseding token document from [{}] reference, retrying", - refreshTokenStatus.getSupersededBy()); - maybeRetryOnFailure.accept(invalidGrantException("could not refresh the requested token")); - } - } - - @Override - public void onFailure(Exception e) { - if (isShardNotAvailableException(e)) { - logger.info("could not find superseding token document from reference [{}], retrying", - refreshTokenStatus.getSupersededBy()); - maybeRetryOnFailure.accept(invalidGrantException("could not refresh the requested token")); - } else { - logger.warn("could not find superseding token document from reference [{}]", refreshTokenStatus.getSupersededBy()); - onFailure.accept(invalidGrantException("could not refresh the requested token")); - } + /** + * Decrypts the values of the superseding access token and the refresh token, using a key derived from the superseded refresh token. It + * encodes the version and serializes the tokens before calling the listener, in the same manner as {@link #createOAuth2Tokens } does. + * + * @param refreshToken The refresh token that the user sent in the request, used to derive the decryption key + * @param refreshTokenStatus The {@link RefreshTokenStatus} containing information about the superseding tokens as retrieved from the + * index + * @param listener The listener to call upon completion with a {@link Tuple} containing the + * serialized access token and serialized refresh token as these will be returned to the client + */ + void decryptAndReturnSupersedingTokens(String refreshToken, RefreshTokenStatus refreshTokenStatus, + ActionListener> listener) { + final byte[] iv = Base64.getDecoder().decode(refreshTokenStatus.getIv()); + final byte[] salt = Base64.getDecoder().decode(refreshTokenStatus.getSalt()); + final byte[] encryptedSupersedingTokens = Base64.getDecoder().decode(refreshTokenStatus.getSupersedingTokens()); + try { + Cipher cipher = getDecryptionCipher(iv, refreshToken, salt); + final String supersedingTokens = new String(cipher.doFinal(encryptedSupersedingTokens), StandardCharsets.UTF_8); + final String[] decryptedTokens = supersedingTokens.split("\\|"); + if (decryptedTokens.length != 2) { + logger.warn("Decrypted tokens string is not correctly formatted"); + listener.onFailure(invalidGrantException("could not refresh the requested token")); } - }); + listener.onResponse(new Tuple<>(prependVersionAndEncodeAccessToken(refreshTokenStatus.getVersion(), decryptedTokens[0]), + prependVersionAndEncodeRefreshToken(refreshTokenStatus.getVersion(), decryptedTokens[1]))); + } catch (GeneralSecurityException | IOException e) { + logger.warn("Could not get stored superseding token values", e); + listener.onFailure(invalidGrantException("could not refresh the requested token")); + } } - private void getSupersedingTokenDocAsync(RefreshTokenStatus refreshTokenStatus, ActionListener listener) { - final String supersedingDocReference = refreshTokenStatus.getSupersededBy(); - if (supersedingDocReference.startsWith(securityTokensIndex.aliasName() + "|")) { - // superseding token doc is stored on the new tokens index, irrespective of where the superseded token doc resides - final String supersedingDocId = supersedingDocReference.substring(securityTokensIndex.aliasName().length() + 1); - getTokenDocAsync(supersedingDocId, securityTokensIndex, listener); - } else { - assert false == supersedingDocReference - .contains("|") : "The superseding doc reference appears to contain an alias name but should not"; - getTokenDocAsync(supersedingDocReference, securityMainIndex, listener); - } + /* + * Encrypts the values of the superseding access token and the refresh token, using a key derived from the superseded refresh token. + * The tokens are concatenated to a string separated with `|` before encryption so that we only perform one encryption operation + * and that we only need to store one field + */ + String encryptSupersedingTokens(String supersedingAccessToken, String supersedingRefreshToken, + String refreshToken, byte[] iv, byte[] salt) throws GeneralSecurityException { + Cipher cipher = getEncryptionCipher(iv, refreshToken, salt); + final String supersedingTokens = supersedingAccessToken + "|" + supersedingRefreshToken; + return Base64.getEncoder().encodeToString(cipher.doFinal(supersedingTokens.getBytes(StandardCharsets.UTF_8))); } private void getTokenDocAsync(String tokenDocId, SecurityIndexManager tokensIndex, ActionListener listener) { @@ -1016,7 +1093,7 @@ private void getTokenDocAsync(String tokenDocId, SecurityIndexManager tokensInde () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, getRequest, listener, client::get)); } - private Version getTokenVersionCompatibility() { + Version getTokenVersionCompatibility() { // newly minted tokens are compatible with the min node version in the cluster return clusterService.state().nodes().getMinNodeVersion(); } @@ -1029,13 +1106,13 @@ public static Boolean isTokenServiceEnabled(Settings settings) { * A refresh token has a fixed maximum lifetime of {@code ExpiredTokenRemover#MAXIMUM_TOKEN_LIFETIME_HOURS} hours. This checks if the * token document represents a valid token wrt this time interval. */ - private static Optional checkTokenDocumentExpired(Instant now, Map source) { - final Long creationEpochMilli = (Long) source.get("creation_time"); + private static Optional checkTokenDocumentExpired(Instant refreshRequested, Map src) { + final Long creationEpochMilli = (Long) src.get("creation_time"); if (creationEpochMilli == null) { throw new IllegalStateException("token document is missing creation time value"); } else { final Instant creationTime = Instant.ofEpochMilli(creationEpochMilli); - if (now.isAfter(creationTime.plus(ExpiredTokenRemover.MAXIMUM_TOKEN_LIFETIME_HOURS, ChronoUnit.HOURS))) { + if (refreshRequested.isAfter(creationTime.plus(ExpiredTokenRemover.MAXIMUM_TOKEN_LIFETIME_HOURS, ChronoUnit.HOURS))) { return Optional.of(invalidGrantException("token document has expired")); } else { return Optional.empty(); @@ -1048,17 +1125,17 @@ private static Optional checkTokenDocumentExpire * parsed {@code RefreshTokenStatus} together with an {@code Optional} validation exception that encapsulates the various logic about * when and by who a token can be refreshed. */ - private static Tuple> checkTokenDocumentForRefresh(Instant now, - Authentication clientAuth, Map source) throws IllegalStateException, DateTimeException { + private static Tuple> checkTokenDocumentForRefresh( + Instant refreshRequested, Authentication clientAuth, Map source) throws IllegalStateException, DateTimeException { final RefreshTokenStatus refreshTokenStatus = RefreshTokenStatus.fromSourceMap(getRefreshTokenSourceMap(source)); final UserToken userToken = UserToken.fromSourceMap(getUserTokenSourceMap(source)); refreshTokenStatus.setVersion(userToken.getVersion()); - final ElasticsearchSecurityException validationException = checkTokenDocumentExpired(now, source).orElseGet(() -> { + final ElasticsearchSecurityException validationException = checkTokenDocumentExpired(refreshRequested, source).orElseGet(() -> { if (refreshTokenStatus.isInvalidated()) { return invalidGrantException("token has been invalidated"); } else { return checkClientCanRefresh(refreshTokenStatus, clientAuth) - .orElse(checkMultipleRefreshes(now, refreshTokenStatus).orElse(null)); + .orElse(checkMultipleRefreshes(refreshRequested, refreshTokenStatus).orElse(null)); } }); return new Tuple<>(refreshTokenStatus, Optional.ofNullable(validationException)); @@ -1111,13 +1188,14 @@ private static Map getUserTokenSourceMap(Map sou * @return An {@code Optional} containing the exception in case this refresh token cannot be reused, or an empty Optional if * refreshing is allowed. */ - private static Optional checkMultipleRefreshes(Instant now, RefreshTokenStatus refreshTokenStatus) { + private static Optional checkMultipleRefreshes(Instant refreshRequested, + RefreshTokenStatus refreshTokenStatus) { if (refreshTokenStatus.isRefreshed()) { if (refreshTokenStatus.getVersion().onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { - if (now.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { + if (refreshRequested.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { return Optional.of(invalidGrantException("token has already been refreshed more than 30 seconds in the past")); } - if (now.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { + if (refreshRequested.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { return Optional .of(invalidGrantException("token has been refreshed more than 30 seconds in the future, clock skew too great")); } @@ -1269,7 +1347,7 @@ private void sourceIndicesWithTokensAndRun(ActionListener> listener private BytesReference createTokenDocument(UserToken userToken, @Nullable String refreshToken, @Nullable Authentication originatingClientAuth) { assert refreshToken == null || originatingClientAuth != null : "non-null refresh token " + refreshToken - + " requires non-null client authn " + originatingClientAuth; + + " requires non-null client authn " + originatingClientAuth; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.startObject(); builder.field("doc_type", TOKEN_DOC_TYPE); @@ -1332,21 +1410,14 @@ private Tuple filterAndParseHit(SearchHit hit, @Nullable Pred */ private Tuple parseTokensFromDocument(Map source, @Nullable Predicate> filter) throws IllegalStateException, DateTimeException { - final String plainRefreshToken = (String) ((Map) source.get("refresh_token")).get("token"); + final String hashedRefreshToken = (String) ((Map) source.get("refresh_token")).get("token"); final Map userTokenSource = (Map) ((Map) source.get("access_token")).get("user_token"); if (null != filter && filter.test(userTokenSource) == false) { return null; } final UserToken userToken = UserToken.fromSourceMap(userTokenSource); - if (userToken.getVersion().onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { - final String versionedRefreshToken = plainRefreshToken != null ? - prependVersionAndEncode(userToken.getVersion(), plainRefreshToken) : null; - return new Tuple<>(userToken, versionedRefreshToken); - } else { - // do not prepend version to refresh token as the audience node version cannot deal with it - return new Tuple<>(userToken, plainRefreshToken); - } + return new Tuple<>(userToken, hashedRefreshToken); } private static String getTokenDocumentId(UserToken userToken) { @@ -1450,7 +1521,7 @@ public TimeValue getExpirationDelay() { return expirationDelay; } - private Instant getExpirationTime() { + Instant getExpirationTime() { return clock.instant().plusSeconds(expirationDelay.getSeconds()); } @@ -1478,38 +1549,34 @@ private String getFromHeader(ThreadContext threadContext) { return null; } - /** - * Serializes a token to a String containing the minimum compatible node version for decoding it back and either an encrypted - * representation of the token id for versions earlier to {@code #VERSION_ACCESS_TOKENS_UUIDS} or the token itself for versions after - * {@code #VERSION_ACCESS_TOKENS_UUIDS} - */ - public String getAccessTokenAsString(UserToken userToken) throws IOException, GeneralSecurityException { - if (userToken.getVersion().onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { + String prependVersionAndEncodeAccessToken(Version version, String accessToken) throws IOException, GeneralSecurityException { + if (version.onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { try (ByteArrayOutputStream os = new ByteArrayOutputStream(MINIMUM_BASE64_BYTES); OutputStream base64 = Base64.getEncoder().wrap(os); StreamOutput out = new OutputStreamStreamOutput(base64)) { - out.setVersion(userToken.getVersion()); - Version.writeVersion(userToken.getVersion(), out); - out.writeString(userToken.getId()); + out.setVersion(version); + Version.writeVersion(version, out); + out.writeString(accessToken); return new String(os.toByteArray(), StandardCharsets.UTF_8); } } else { // we know that the minimum length is larger than the default of the ByteArrayOutputStream so set the size to this explicitly - try (ByteArrayOutputStream os = new ByteArrayOutputStream(MINIMUM_BASE64_BYTES); + try (ByteArrayOutputStream os = new ByteArrayOutputStream(LEGACY_MINIMUM_BASE64_BYTES); OutputStream base64 = Base64.getEncoder().wrap(os); StreamOutput out = new OutputStreamStreamOutput(base64)) { - out.setVersion(userToken.getVersion()); + out.setVersion(version); KeyAndCache keyAndCache = keyCache.activeKeyCache; - Version.writeVersion(userToken.getVersion(), out); + Version.writeVersion(version, out); out.writeByteArray(keyAndCache.getSalt().bytes); out.writeByteArray(keyAndCache.getKeyHash().bytes); - final byte[] initializationVector = getNewInitializationVector(); + final byte[] initializationVector = getRandomBytes(IV_BYTES); out.writeByteArray(initializationVector); try (CipherOutputStream encryptedOutput = - new CipherOutputStream(out, getEncryptionCipher(initializationVector, keyAndCache, userToken.getVersion())); + new CipherOutputStream(out, getEncryptionCipher(initializationVector, keyAndCache, version)); StreamOutput encryptedStreamOutput = new OutputStreamStreamOutput(encryptedOutput)) { - encryptedStreamOutput.setVersion(userToken.getVersion()); - encryptedStreamOutput.writeString(userToken.getId()); + encryptedStreamOutput.setVersion(version); + encryptedStreamOutput.writeString(accessToken); + // StreamOutput needs to be closed explicitly because it wraps CipherOutputStream encryptedStreamOutput.close(); return new String(os.toByteArray(), StandardCharsets.UTF_8); } @@ -1517,7 +1584,7 @@ public String getAccessTokenAsString(UserToken userToken) throws IOException, Ge } } - private static String prependVersionAndEncode(Version version, String payload) { + static String prependVersionAndEncodeRefreshToken(Version version, String payload) { try (ByteArrayOutputStream os = new ByteArrayOutputStream(); OutputStream base64 = Base64.getEncoder().wrap(os); StreamOutput out = new OutputStreamStreamOutput(base64)) { @@ -1563,6 +1630,17 @@ Cipher getEncryptionCipher(byte[] iv, KeyAndCache keyAndCache, Version version) return cipher; } + /** + * Initialize the encryption cipher using the provided password to derive the encryption key. + */ + Cipher getEncryptionCipher(byte[] iv, String password, byte[] salt) throws GeneralSecurityException { + SecretKey key = computeSecretKey(password.toCharArray(), salt, TOKENS_ENCRYPTION_KEY_ITERATIONS); + Cipher cipher = Cipher.getInstance(ENCRYPTION_CIPHER); + cipher.init(Cipher.ENCRYPT_MODE, key, new GCMParameterSpec(128, iv), secureRandom); + cipher.updateAAD(salt); + return cipher; + } + private void getKeyAsync(BytesKey decodedSalt, KeyAndCache keyAndCache, ActionListener listener) { final SecretKey decodeKey = keyAndCache.getKey(decodedSalt); if (decodeKey != null) { @@ -1595,21 +1673,31 @@ private Cipher getDecryptionCipher(byte[] iv, SecretKey key, Version version, By return cipher; } - // Package private for testing - byte[] getNewInitializationVector() { - final byte[] initializationVector = new byte[IV_BYTES]; - secureRandom.nextBytes(initializationVector); - return initializationVector; + /** + * Initialize the decryption cipher using the provided password to derive the decryption key. + */ + private Cipher getDecryptionCipher(byte[] iv, String password, byte[] salt) throws GeneralSecurityException { + SecretKey key = computeSecretKey(password.toCharArray(), salt, TOKENS_ENCRYPTION_KEY_ITERATIONS); + Cipher cipher = Cipher.getInstance(ENCRYPTION_CIPHER); + cipher.init(Cipher.DECRYPT_MODE, key, new GCMParameterSpec(128, iv), secureRandom); + cipher.updateAAD(salt); + return cipher; + } + + byte[] getRandomBytes(int length) { + final byte[] bytes = new byte[length]; + secureRandom.nextBytes(bytes); + return bytes; } /** * Generates a secret key based off of the provided password and salt. - * This method is computationally expensive. + * This method can be computationally expensive. */ - static SecretKey computeSecretKey(char[] rawPassword, byte[] salt) + static SecretKey computeSecretKey(char[] rawPassword, byte[] salt, int iterations) throws NoSuchAlgorithmException, InvalidKeySpecException { SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance(KDF_ALGORITHM); - PBEKeySpec keySpec = new PBEKeySpec(rawPassword, salt, ITERATIONS, 128); + PBEKeySpec keySpec = new PBEKeySpec(rawPassword, salt, iterations, 128); SecretKey tmp = secretKeyFactory.generateSecret(keySpec); return new SecretKeySpec(tmp.getEncoded(), "AES"); } @@ -2003,7 +2091,7 @@ private KeyAndCache(KeyAndTimestamp keyAndTimestamp, BytesKey salt) { .setMaximumWeight(500L) .build(); try { - SecretKey secretKey = computeSecretKey(keyAndTimestamp.getKey().getChars(), salt.bytes); + SecretKey secretKey = computeSecretKey(keyAndTimestamp.getKey().getChars(), salt.bytes, TOKEN_SERVICE_KEY_ITERATIONS); keyCache.put(salt, secretKey); } catch (Exception e) { throw new IllegalStateException(e); @@ -2019,7 +2107,7 @@ private SecretKey getKey(BytesKey salt) { public SecretKey getOrComputeKey(BytesKey decodedSalt) throws ExecutionException { return keyCache.computeIfAbsent(decodedSalt, (salt) -> { try (SecureString closeableChars = keyAndTimestamp.getKey().clone()) { - return computeSecretKey(closeableChars.getChars(), salt.bytes); + return computeSecretKey(closeableChars.getChars(), salt.bytes, TOKEN_SERVICE_KEY_ITERATIONS); } }); } @@ -2074,24 +2162,32 @@ KeyAndCache get(BytesKey passphraseHash) { /** * Contains metadata associated with the refresh token that is used for validity checks, but does not contain the proper token string. */ - private static final class RefreshTokenStatus { + static final class RefreshTokenStatus { private final boolean invalidated; private final String associatedUser; private final String associatedRealm; private final boolean refreshed; @Nullable private final Instant refreshInstant; - @Nullable private final String supersededBy; + @Nullable + private final String supersedingTokens; + @Nullable + private final String iv; + @Nullable + private final String salt; private Version version; - private RefreshTokenStatus(boolean invalidated, String associatedUser, String associatedRealm, boolean refreshed, - Instant refreshInstant, String supersededBy) { + // pkg-private for testing + RefreshTokenStatus(boolean invalidated, String associatedUser, String associatedRealm, boolean refreshed, Instant refreshInstant, + String supersedingTokens, String iv, String salt) { this.invalidated = invalidated; this.associatedUser = associatedUser; this.associatedRealm = associatedRealm; this.refreshed = refreshed; this.refreshInstant = refreshInstant; - this.supersededBy = supersededBy; + this.supersedingTokens = supersedingTokens; + this.iv = iv; + this.salt = salt; } boolean isInvalidated() { @@ -2114,8 +2210,19 @@ boolean isRefreshed() { return refreshInstant; } - @Nullable String getSupersededBy() { - return supersededBy; + @Nullable + String getSupersedingTokens() { + return supersedingTokens; + } + + @Nullable + String getIv() { + return iv; + } + + @Nullable + String getSalt() { + return salt; } Version getVersion() { @@ -2149,8 +2256,11 @@ static RefreshTokenStatus fromSourceMap(Map refreshTokenSource) } final Long refreshEpochMilli = (Long) refreshTokenSource.get("refresh_time"); final Instant refreshInstant = refreshEpochMilli == null ? null : Instant.ofEpochMilli(refreshEpochMilli); - final String supersededBy = (String) refreshTokenSource.get("superseded_by"); - return new RefreshTokenStatus(invalidated, associatedUser, associatedRealm, refreshed, refreshInstant, supersededBy); + final String supersedingTokens = (String) refreshTokenSource.get("superseding.encrypted_tokens"); + final String iv = (String) refreshTokenSource.get("superseding.encryption_iv"); + final String salt = (String) refreshTokenSource.get("superseding.encryption_salt"); + return new RefreshTokenStatus(invalidated, associatedUser, associatedRealm, refreshed, refreshInstant, supersedingTokens, + iv, salt); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java index 2bcf0849084bc..f46aa42a24450 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java @@ -50,7 +50,7 @@ public final class UserToken implements Writeable, ToXContentObject { /** * Create a new token with an autogenerated id */ - UserToken(Version version, Authentication authentication, Instant expirationTime, Map metadata) { + private UserToken(Version version, Authentication authentication, Instant expirationTime, Map metadata) { this(UUIDs.randomBase64UUID(), version, authentication, expirationTime, metadata); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java index 69cedf6389f7f..0ab3c96167c2c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -47,7 +48,6 @@ import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; -import org.elasticsearch.xpack.security.authc.UserToken; import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectRealm; import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectTestCase; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; @@ -195,20 +195,21 @@ public void testLogoutInvalidatesTokens() throws Exception { final JWT signedIdToken = generateIdToken(subject, randomAlphaOfLength(8), randomAlphaOfLength(8)); final User user = new User("oidc-user", new String[]{"superuser"}, null, null, null, true); final Authentication.RealmRef realmRef = new Authentication.RealmRef(oidcRealm.name(), OpenIdConnectRealmSettings.TYPE, "node01"); - final Authentication authentication = new Authentication(user, realmRef, null); - final Map tokenMetadata = new HashMap<>(); tokenMetadata.put("id_token_hint", signedIdToken.serialize()); tokenMetadata.put("oidc_realm", REALM_NAME); + final Authentication authentication = new Authentication(user, realmRef, null, null, Authentication.AuthenticationType.REALM, + tokenMetadata); - final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, tokenMetadata, true, future); - final UserToken userToken = future.actionGet().v1(); - mockGetTokenFromId(userToken, false, client); - final String tokenString = tokenService.getAccessTokenAsString(userToken); + final PlainActionFuture> future = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, tokenMetadata, future); + final String accessToken = future.actionGet().v1(); + mockGetTokenFromId(tokenService, userTokenId, authentication, false, client); final OpenIdConnectLogoutRequest request = new OpenIdConnectLogoutRequest(); - request.setToken(tokenString); + request.setToken(accessToken); final PlainActionFuture listener = new PlainActionFuture<>(); action.doExecute(mock(Task.class), request, listener); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index 3f4ac8942089c..6a9c487bf2013 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; @@ -66,7 +67,6 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; -import org.elasticsearch.xpack.security.authc.UserToken; import org.elasticsearch.xpack.security.authc.saml.SamlLogoutRequestHandler; import org.elasticsearch.xpack.security.authc.saml.SamlNameId; import org.elasticsearch.xpack.security.authc.saml.SamlRealm; @@ -252,9 +252,14 @@ public void cleanup() { } public void testInvalidateCorrectTokensFromLogoutRequest() throws Exception { + final String userTokenId1 = UUIDs.randomBase64UUID(); + final String refreshToken1 = UUIDs.randomBase64UUID(); + final String userTokenId2 = UUIDs.randomBase64UUID(); + final String refreshToken2 = UUIDs.randomBase64UUID(); storeToken(logoutRequest.getNameId(), randomAlphaOfLength(10)); - final Tuple tokenToInvalidate1 = storeToken(logoutRequest.getNameId(), logoutRequest.getSession()); - final Tuple tokenToInvalidate2 = storeToken(logoutRequest.getNameId(), logoutRequest.getSession()); + final Tuple tokenToInvalidate1 = storeToken(userTokenId1, refreshToken1, logoutRequest.getNameId(), + logoutRequest.getSession()); + storeToken(userTokenId2, refreshToken2, logoutRequest.getNameId(), logoutRequest.getSession()); storeToken(new SamlNameId(NameID.PERSISTENT, randomAlphaOfLength(16), null, null, null), logoutRequest.getSession()); assertThat(indexRequests.size(), equalTo(4)); @@ -316,27 +321,27 @@ public void testInvalidateCorrectTokensFromLogoutRequest() throws Exception { assertThat(filter1.get(1), instanceOf(TermQueryBuilder.class)); assertThat(((TermQueryBuilder) filter1.get(1)).fieldName(), equalTo("refresh_token.token")); assertThat(((TermQueryBuilder) filter1.get(1)).value(), - equalTo(TokenService.unpackVersionAndPayload(tokenToInvalidate1.v2()).v2())); + equalTo(TokenService.hashTokenString(TokenService.unpackVersionAndPayload(tokenToInvalidate1.v2()).v2()))); assertThat(bulkRequests.size(), equalTo(4)); // 4 updates (refresh-token + access-token) // Invalidate refresh token 1 assertThat(bulkRequests.get(0).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(0).requests().get(0).id(), equalTo("token_" + tokenToInvalidate1.v1().getId())); + assertThat(bulkRequests.get(0).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId1))); UpdateRequest updateRequest1 = (UpdateRequest) bulkRequests.get(0).requests().get(0); assertThat(updateRequest1.toString().contains("refresh_token"), equalTo(true)); // Invalidate access token 1 assertThat(bulkRequests.get(1).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(1).requests().get(0).id(), equalTo("token_" + tokenToInvalidate1.v1().getId())); + assertThat(bulkRequests.get(1).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId1))); UpdateRequest updateRequest2 = (UpdateRequest) bulkRequests.get(1).requests().get(0); assertThat(updateRequest2.toString().contains("access_token"), equalTo(true)); // Invalidate refresh token 2 assertThat(bulkRequests.get(2).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(2).requests().get(0).id(), equalTo("token_" + tokenToInvalidate2.v1().getId())); + assertThat(bulkRequests.get(2).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId2))); UpdateRequest updateRequest3 = (UpdateRequest) bulkRequests.get(2).requests().get(0); assertThat(updateRequest3.toString().contains("refresh_token"), equalTo(true)); // Invalidate access token 2 assertThat(bulkRequests.get(3).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(3).requests().get(0).id(), equalTo("token_" + tokenToInvalidate2.v1().getId())); + assertThat(bulkRequests.get(3).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId2))); UpdateRequest updateRequest4 = (UpdateRequest) bulkRequests.get(3).requests().get(0); assertThat(updateRequest4.toString().contains("access_token"), equalTo(true)); } @@ -359,13 +364,19 @@ private Function findTokenByRefreshToken(SearchHit[] }; } - private Tuple storeToken(SamlNameId nameId, String session) throws IOException { + private Tuple storeToken(String userTokenId, String refreshToken, SamlNameId nameId, String session) { Authentication authentication = new Authentication(new User("bob"), new RealmRef("native", NativeRealmSettings.TYPE, "node01"), null); final Map metadata = samlRealm.createTokenMetadata(nameId, session); - final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, metadata, true, future); + final PlainActionFuture> future = new PlainActionFuture<>(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, metadata, future); return future.actionGet(); } + private Tuple storeToken(SamlNameId nameId, String session) { + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + return storeToken(userTokenId, refreshToken, nameId, session); + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index 1652122bf6e80..9b9dc79a29cd4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; @@ -55,7 +56,6 @@ import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; -import org.elasticsearch.xpack.security.authc.UserToken; import org.elasticsearch.xpack.security.authc.saml.SamlNameId; import org.elasticsearch.xpack.security.authc.saml.SamlRealm; import org.elasticsearch.xpack.security.authc.saml.SamlRealmTests; @@ -236,19 +236,21 @@ public void testLogoutInvalidatesToken() throws Exception { .map(); final User user = new User("punisher", new String[]{"superuser"}, null, null, userMetaData, true); final Authentication.RealmRef realmRef = new Authentication.RealmRef(samlRealm.name(), SamlRealmSettings.TYPE, "node01"); - final Authentication authentication = new Authentication(user, realmRef, null); - final Map tokenMetaData = samlRealm.createTokenMetadata( - new SamlNameId(NameID.TRANSIENT, nameId, null, null, null), session); + new SamlNameId(NameID.TRANSIENT, nameId, null, null, null), session); + final Authentication authentication = new Authentication(user, realmRef, null, null, Authentication.AuthenticationType.REALM, + tokenMetaData); + - final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, tokenMetaData, true, future); - final UserToken userToken = future.actionGet().v1(); - mockGetTokenFromId(userToken, false, client); - final String tokenString = tokenService.getAccessTokenAsString(userToken); + final PlainActionFuture> future = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, tokenMetaData, future); + final String accessToken = future.actionGet().v1(); + mockGetTokenFromId(tokenService, userTokenId, authentication, false, client); final SamlLogoutRequest request = new SamlLogoutRequest(); - request.setToken(tokenString); + request.setToken(accessToken); final PlainActionFuture listener = new PlainActionFuture<>(); action.doExecute(mock(Task.class), request, listener); final SamlLogoutResponse response = listener.get(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index c7994888a2631..67ce5ce2b27af 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -1108,14 +1108,16 @@ public void testAuthenticateWithToken() throws Exception { User user = new User("_username", "r1"); final AtomicBoolean completed = new AtomicBoolean(false); final Authentication expected = new Authentication(user, new RealmRef("realm", "custom", "node"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); - tokenService.createOAuth2Tokens(expected, originatingAuth, Collections.emptyMap(), true, tokenFuture); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, expected, originatingAuth, Collections.emptyMap(), tokenFuture); } - String token = tokenService.getAccessTokenAsString(tokenFuture.get().v1()); + String token = tokenFuture.get().v1(); when(client.prepareMultiGet()).thenReturn(new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE)); - mockGetTokenFromId(tokenFuture.get().v1(), false, client); + mockGetTokenFromId(tokenService, userTokenId, expected, false, client); when(securityIndex.isAvailable()).thenReturn(true); when(securityIndex.indexExists()).thenReturn(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { @@ -1191,13 +1193,15 @@ public void testExpiredToken() throws Exception { when(securityIndex.indexExists()).thenReturn(true); User user = new User("_username", "r1"); final Authentication expected = new Authentication(user, new RealmRef("realm", "custom", "node"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); - tokenService.createOAuth2Tokens(expected, originatingAuth, Collections.emptyMap(), true, tokenFuture); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, expected, originatingAuth, Collections.emptyMap(), tokenFuture); } - String token = tokenService.getAccessTokenAsString(tokenFuture.get().v1()); - mockGetTokenFromId(tokenFuture.get().v1(), true, client); + String token = tokenFuture.get().v1(); + mockGetTokenFromId(tokenService, userTokenId, expected, true, client); doAnswer(invocationOnMock -> { ((Runnable) invocationOnMock.getArguments()[1]).run(); return null; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 7f09444784c6d..42101b1f4ec97 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -28,8 +28,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -62,10 +60,7 @@ import org.junit.Before; import org.junit.BeforeClass; -import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.OutputStream; -import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.time.Clock; import java.time.Instant; @@ -75,7 +70,6 @@ import java.util.HashMap; import java.util.Map; -import javax.crypto.CipherOutputStream; import javax.crypto.SecretKey; import static java.time.Clock.systemUTC; @@ -169,15 +163,16 @@ public static void shutdownThreadpool() throws InterruptedException { public void testAttachAndGetToken() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", randomFrom("Bearer ", "BEARER ", "bearer ") + tokenService.getAccessTokenAsString(token)); + requestContext.putHeader("Authorization", randomFrom("Bearer ", "BEARER ", "bearer ") + accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -214,16 +209,21 @@ public void testInvalidAuthorizationHeader() throws Exception { public void testRotateKey() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -240,15 +240,18 @@ public void testRotateKey() throws Exception { assertAuthentication(authentication, serialized.getAuthentication()); } - PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, newTokenFuture); - final UserToken newToken = newTokenFuture.get().v1(); - assertNotNull(newToken); - assertNotEquals(getDeprecatedAccessTokenString(tokenService, newToken), getDeprecatedAccessTokenString(tokenService, token)); + PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); + final String newUserTokenId = UUIDs.randomBase64UUID(); + final String newRefreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(newUserTokenId, newRefreshToken, authentication, authentication, Collections.emptyMap(), + newTokenFuture); + final String newAccessToken = newTokenFuture.get().v1(); + assertNotNull(newAccessToken); + assertNotEquals(newAccessToken, accessToken); requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, newToken)); - mockGetTokenFromId(newToken, false); + storeTokenHeader(requestContext, newAccessToken); + mockGetTokenFromId(tokenService, newUserTokenId, authentication, false); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -267,6 +270,10 @@ private void rotateKeys(TokenService tokenService) { public void testKeyExchange() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } int numRotations = randomIntBetween(1, 5); for (int i = 0; i < numRotations; i++) { rotateKeys(tokenService); @@ -274,20 +281,21 @@ public void testKeyExchange() throws Exception { TokenService otherTokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); otherTokenService.refreshMetaData(tokenService.getTokenMetaData()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); otherTokenService.getAndValidateToken(requestContext, future); UserToken serialized = future.get(); - assertEquals(authentication, serialized.getAuthentication()); + assertAuthentication(serialized.getAuthentication(), authentication); } rotateKeys(tokenService); @@ -298,22 +306,27 @@ public void testKeyExchange() throws Exception { PlainActionFuture future = new PlainActionFuture<>(); otherTokenService.getAndValidateToken(requestContext, future); UserToken serialized = future.get(); - assertEquals(authentication, serialized.getAuthentication()); + assertAuthentication(serialized.getAuthentication(), authentication); } } public void testPruneKeys() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -336,11 +349,14 @@ public void testPruneKeys() throws Exception { assertAuthentication(authentication, serialized.getAuthentication()); } - PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, newTokenFuture); - final UserToken newToken = newTokenFuture.get().v1(); - assertNotNull(newToken); - assertNotEquals(getDeprecatedAccessTokenString(tokenService, newToken), getDeprecatedAccessTokenString(tokenService, token)); + PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); + final String newUserTokenId = UUIDs.randomBase64UUID(); + final String newRefreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(newUserTokenId, newRefreshToken, authentication, authentication, Collections.emptyMap(), + newTokenFuture); + final String newAccessToken = newTokenFuture.get().v1(); + assertNotNull(newAccessToken); + assertNotEquals(newAccessToken, accessToken); metaData = tokenService.pruneKeys(1); tokenService.refreshMetaData(metaData); @@ -353,8 +369,8 @@ public void testPruneKeys() throws Exception { } requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, newToken)); - mockGetTokenFromId(newToken, false); + storeTokenHeader(requestContext, newAccessToken); + mockGetTokenFromId(tokenService, newUserTokenId, authentication, false); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); tokenService.getAndValidateToken(requestContext, future); @@ -366,16 +382,21 @@ public void testPruneKeys() throws Exception { public void testPassphraseWorks() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -395,29 +416,40 @@ public void testPassphraseWorks() throws Exception { public void testGetTokenWhenKeyCacheHasExpired() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - UserToken token = tokenFuture.get().v1(); - assertThat(getDeprecatedAccessTokenString(tokenService, token), notNullValue()); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + String accessToken = tokenFuture.get().v1(); + assertThat(accessToken, notNullValue()); tokenService.clearActiveKeyCache(); - assertThat(getDeprecatedAccessTokenString(tokenService, token), notNullValue()); + + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + accessToken = tokenFuture.get().v1(); + assertThat(accessToken, notNullValue()); } public void testInvalidatedToken() throws Exception { when(securityMainIndex.indexExists()).thenReturn(true); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, true); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, true); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -436,8 +468,10 @@ private void storeTokenHeader(ThreadContext requestContext, String tokenString) public void testComputeSecretKeyIsConsistent() throws Exception { byte[] saltArr = new byte[32]; random().nextBytes(saltArr); - SecretKey key = TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr); - SecretKey key2 = TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr); + SecretKey key = + TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr, TokenService.TOKEN_SERVICE_KEY_ITERATIONS); + SecretKey key2 = + TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr, TokenService.TOKEN_SERVICE_KEY_ITERATIONS); assertArrayEquals(key.getEncoded(), key2.getEncoded()); } @@ -468,14 +502,15 @@ public void testTokenExpiry() throws Exception { ClockMock clock = ClockMock.frozen(); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, clock); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + final String userTokenId = UUIDs.randomBase64UUID(); + UserToken userToken = new UserToken(userTokenId, tokenService.getTokenVersionCompatibility(), authentication, + tokenService.getExpirationTime(), Collections.emptyMap()); + mockGetTokenFromId(userToken, false); + final String accessToken = tokenService.prependVersionAndEncodeAccessToken(tokenService.getTokenVersionCompatibility(), userTokenId + ); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { // the clock is still frozen, so the cookie should be valid @@ -519,7 +554,7 @@ public void testTokenServiceDisabled() throws Exception { TokenService tokenService = new TokenService(Settings.builder() .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), false) .build(), - Clock.systemUTC(), client, licenseState, securityMainIndex, securityTokensIndex, clusterService); + Clock.systemUTC(), client, licenseState, securityMainIndex, securityTokensIndex, clusterService); IllegalStateException e = expectThrows(IllegalStateException.class, () -> tokenService.createOAuth2Tokens(null, null, null, true, null)); assertEquals("security tokens are not enabled", e.getMessage()); @@ -577,14 +612,15 @@ public void testMalformedToken() throws Exception { public void testIndexNotAvailable() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - //mockGetTokenFromId(token, false); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(requestContext, accessToken); doAnswer(invocationOnMock -> { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; @@ -620,34 +656,64 @@ public void testIndexNotAvailable() throws Exception { when(tokensIndex.isAvailable()).thenReturn(true); when(tokensIndex.indexExists()).thenReturn(true); - mockGetTokenFromId(token, false); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); future = new PlainActionFuture<>(); tokenService.getAndValidateToken(requestContext, future); - assertEquals(future.get().getAuthentication(), token.getAuthentication()); + assertAuthentication(future.get().getAuthentication(), authentication); } } public void testGetAuthenticationWorksWithExpiredUserToken() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - UserToken expired = new UserToken(authentication, Instant.now().minus(3L, ChronoUnit.DAYS)); + final String userTokenId = UUIDs.randomBase64UUID(); + UserToken expired = new UserToken(userTokenId, tokenService.getTokenVersionCompatibility(), authentication, + Instant.now().minus(3L, ChronoUnit.DAYS), Collections.emptyMap()); mockGetTokenFromId(expired, false); - String userTokenString = tokenService.getAccessTokenAsString(expired); + final String accessToken = tokenService.prependVersionAndEncodeAccessToken(tokenService.getTokenVersionCompatibility(), userTokenId + ); PlainActionFuture>> authFuture = new PlainActionFuture<>(); - tokenService.getAuthenticationAndMetaData(userTokenString, authFuture); + tokenService.getAuthenticationAndMetaData(accessToken, authFuture); Authentication retrievedAuth = authFuture.actionGet().v1(); - assertEquals(authentication, retrievedAuth); + assertAuthentication(authentication, retrievedAuth); + } + + public void testSupercedingTokenEncryption() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String refrehToken = UUIDs.randomBase64UUID(); + final String newAccessToken = UUIDs.randomBase64UUID(); + final String newRefreshToken = UUIDs.randomBase64UUID(); + final byte[] iv = tokenService.getRandomBytes(TokenService.IV_BYTES); + final byte[] salt = tokenService.getRandomBytes(TokenService.SALT_BYTES); + final Version version = tokenService.getTokenVersionCompatibility(); + String encryptedTokens = tokenService.encryptSupersedingTokens(newAccessToken, newRefreshToken, refrehToken, iv, + salt); + TokenService.RefreshTokenStatus refreshTokenStatus = new TokenService.RefreshTokenStatus(false, + authentication.getUser().principal(), authentication.getAuthenticatedBy().getName(), true, Instant.now().minusSeconds(5L), + encryptedTokens, Base64.getEncoder().encodeToString(iv), Base64.getEncoder().encodeToString(salt)); + refreshTokenStatus.setVersion(version); + tokenService.decryptAndReturnSupersedingTokens(refrehToken, refreshTokenStatus, tokenFuture); + if (version.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { + // previous versions serialized the access token encrypted and the cipher text was different each time (due to different IVs) + assertThat(tokenService.prependVersionAndEncodeAccessToken(version, newAccessToken), equalTo(tokenFuture.get().v1())); + } + assertThat(TokenService.prependVersionAndEncodeRefreshToken(version, newRefreshToken), equalTo(tokenFuture.get().v2())); } public void testCannotValidateTokenIfLicenseDoesNotAllowTokens() throws Exception { when(licenseState.isTokenServiceAllowed()).thenReturn(true); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - UserToken token = new UserToken(authentication, Instant.now().plusSeconds(180)); + final String userTokenId = UUIDs.randomBase64UUID(); + UserToken token = new UserToken(userTokenId, tokenService.getTokenVersionCompatibility(), authentication, + Instant.now().plusSeconds(180), Collections.emptyMap()); mockGetTokenFromId(token, false); - + final String accessToken = tokenService.prependVersionAndEncodeAccessToken(tokenService.getTokenVersionCompatibility(), userTokenId + ); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(threadContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(threadContext, tokenService.prependVersionAndEncodeAccessToken(token.getVersion(), accessToken)); PlainActionFuture authFuture = new PlainActionFuture<>(); when(licenseState.isTokenServiceAllowed()).thenReturn(false); @@ -660,18 +726,30 @@ private TokenService createTokenService(Settings settings, Clock clock) throws G return new TokenService(settings, clock, client, licenseState, securityMainIndex, securityTokensIndex, clusterService); } - private void mockGetTokenFromId(UserToken userToken, boolean isExpired) { - mockGetTokenFromId(userToken, isExpired, client); + private void mockGetTokenFromId(TokenService tokenService, String accessToken, Authentication authentication, boolean isExpired) { + mockGetTokenFromId(tokenService, accessToken, authentication, isExpired, client); } - public static void mockGetTokenFromId(UserToken userToken, boolean isExpired, Client client) { + public static void mockGetTokenFromId(TokenService tokenService, String userTokenId, Authentication authentication, boolean isExpired, + Client client) { doAnswer(invocationOnMock -> { GetRequest request = (GetRequest) invocationOnMock.getArguments()[0]; ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; GetResponse response = mock(GetResponse.class); - if (userToken.getId().equals(request.id().replace("token_", ""))) { + Version tokenVersion = tokenService.getTokenVersionCompatibility(); + final String possiblyHashedUserTokenId; + if (tokenVersion.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { + possiblyHashedUserTokenId = TokenService.hashTokenString(userTokenId); + } else { + possiblyHashedUserTokenId = userTokenId; + } + if (possiblyHashedUserTokenId.equals(request.id().replace("token_", ""))) { when(response.isExists()).thenReturn(true); Map sourceMap = new HashMap<>(); + final Authentication tokenAuth = new Authentication(authentication.getUser(), authentication.getAuthenticatedBy(), + authentication.getLookedUpBy(), tokenVersion, AuthenticationType.TOKEN, authentication.getMetadata()); + final UserToken userToken = new UserToken(possiblyHashedUserTokenId, tokenVersion, tokenAuth, + tokenService.getExpirationTime(), authentication.getMetadata()); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { userToken.toXContent(builder, ToXContent.EMPTY_PARAMS); Map accessTokenMap = new HashMap<>(); @@ -687,35 +765,42 @@ public static void mockGetTokenFromId(UserToken userToken, boolean isExpired, Cl }).when(client).get(any(GetRequest.class), any(ActionListener.class)); } + private void mockGetTokenFromId(UserToken userToken, boolean isExpired) { + doAnswer(invocationOnMock -> { + GetRequest request = (GetRequest) invocationOnMock.getArguments()[0]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + GetResponse response = mock(GetResponse.class); + final String possiblyHashedUserTokenId; + if (userToken.getVersion().onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { + possiblyHashedUserTokenId = TokenService.hashTokenString(userToken.getId()); + } else { + possiblyHashedUserTokenId = userToken.getId(); + } + if (possiblyHashedUserTokenId.equals(request.id().replace("token_", ""))) { + when(response.isExists()).thenReturn(true); + Map sourceMap = new HashMap<>(); + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + userToken.toXContent(builder, ToXContent.EMPTY_PARAMS); + Map accessTokenMap = new HashMap<>(); + Map userTokenMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), + Strings.toString(builder), false); + userTokenMap.put("id", possiblyHashedUserTokenId); + accessTokenMap.put("user_token", userTokenMap); + accessTokenMap.put("invalidated", isExpired); + sourceMap.put("access_token", accessTokenMap); + } + when(response.getSource()).thenReturn(sourceMap); + } + listener.onResponse(response); + return Void.TYPE; + }).when(client).get(any(GetRequest.class), any(ActionListener.class)); + } + public static void assertAuthentication(Authentication result, Authentication expected) { assertEquals(expected.getUser(), result.getUser()); assertEquals(expected.getAuthenticatedBy(), result.getAuthenticatedBy()); assertEquals(expected.getLookedUpBy(), result.getLookedUpBy()); assertEquals(expected.getMetadata(), result.getMetadata()); - assertEquals(AuthenticationType.TOKEN, result.getAuthenticationType()); - } - - protected String getDeprecatedAccessTokenString(TokenService tokenService, UserToken userToken) throws IOException, - GeneralSecurityException { - try (ByteArrayOutputStream os = new ByteArrayOutputStream(TokenService.MINIMUM_BASE64_BYTES); - OutputStream base64 = Base64.getEncoder().wrap(os); - StreamOutput out = new OutputStreamStreamOutput(base64)) { - out.setVersion(Version.V_7_0_0); - TokenService.KeyAndCache keyAndCache = tokenService.getActiveKeyCache(); - Version.writeVersion(Version.V_7_0_0, out); - out.writeByteArray(keyAndCache.getSalt().bytes); - out.writeByteArray(keyAndCache.getKeyHash().bytes); - final byte[] initializationVector = tokenService.getNewInitializationVector(); - out.writeByteArray(initializationVector); - try (CipherOutputStream encryptedOutput = - new CipherOutputStream(out, tokenService.getEncryptionCipher(initializationVector, keyAndCache, Version.V_7_0_0)); - StreamOutput encryptedStreamOutput = new OutputStreamStreamOutput(encryptedOutput)) { - encryptedStreamOutput.setVersion(Version.V_7_0_0); - encryptedStreamOutput.writeString(userToken.getId()); - encryptedStreamOutput.close(); - return new String(os.toByteArray(), StandardCharsets.UTF_8); - } - } } private DiscoveryNode addAnotherDataNodeWithVersion(ClusterService clusterService, Version version) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java index 6086dc642d22f..e51945cd90418 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java @@ -50,6 +50,10 @@ public void testSSHA256SelfGenerated() throws Exception { testHasherSelfGenerated(Hasher.SSHA256); } + public void testSHA256SelfGenerated() throws Exception { + testHasherSelfGenerated(Hasher.SHA256); + } + public void testNoopSelfGenerated() throws Exception { testHasherSelfGenerated(Hasher.NOOP); } From e700c4c374b2e0a1d4ea81047e3ba14de4680572 Mon Sep 17 00:00:00 2001 From: Russ Cam Date: Mon, 20 May 2019 06:07:28 -0400 Subject: [PATCH 126/321] Remove parent query string parameter (#41098) This commit removes the deprecated parent query string parameter. The routing parameter should be used instead. --- .../src/main/resources/rest-api-spec/api/create.json | 4 ---- .../src/main/resources/rest-api-spec/api/delete.json | 4 ---- .../src/main/resources/rest-api-spec/api/exists.json | 4 ---- .../src/main/resources/rest-api-spec/api/exists_source.json | 4 ---- .../src/main/resources/rest-api-spec/api/explain.json | 4 ---- rest-api-spec/src/main/resources/rest-api-spec/api/get.json | 4 ---- .../src/main/resources/rest-api-spec/api/get_source.json | 4 ---- .../src/main/resources/rest-api-spec/api/index.json | 4 ---- .../src/main/resources/rest-api-spec/api/mtermvectors.json | 5 ----- .../src/main/resources/rest-api-spec/api/termvectors.json | 5 ----- .../src/main/resources/rest-api-spec/api/update.json | 4 ---- 11 files changed, 46 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json index f21d2606364d1..65fcf02807ba1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json @@ -33,10 +33,6 @@ "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, - "parent": { - "type" : "string", - "description" : "ID of the parent document" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index 792f9d89609bf..0152374028832 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -33,10 +33,6 @@ "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the delete operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, - "parent": { - "type" : "string", - "description" : "ID of parent document" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json index 3debd3edce585..2a451344521e4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json @@ -33,10 +33,6 @@ "type": "list", "description" : "A comma-separated list of stored fields to return in the response" }, - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json index 89f9c33e5fb44..30e56141ec001 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json @@ -30,10 +30,6 @@ } }, "params": { - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json index 12aa7a8dca942..203ef23c9cc10 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json @@ -55,10 +55,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index 5b2203c94deb9..f4e0fdd5f90ef 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -33,10 +33,6 @@ "type": "list", "description" : "A comma-separated list of stored fields to return in the response" }, - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json index a26691edc41fc..d6f6964aa7c36 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json @@ -30,10 +30,6 @@ } }, "params": { - "parent": { - "type" : "string", - "description" : "The ID of the parent document" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 2a2053d2250a0..438032980a3c5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -43,10 +43,6 @@ "default" : "index", "description" : "Explicit operation type" }, - "parent": { - "type" : "string", - "description" : "ID of the parent document" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json b/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json index ac73f84e30d6d..aaff8e73259cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json @@ -62,11 +62,6 @@ "description" : "Specific routing value. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", "required" : false }, - "parent" : { - "type" : "string", - "description" : "Parent id of documents. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", - "required" : false - }, "realtime": { "type": "boolean", "description": "Specifies if requests are real-time as opposed to near-real-time (default: true).", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json b/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json index 0570433507055..bbbdc7c87ad0b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json @@ -62,11 +62,6 @@ "description" : "Specific routing value.", "required" : false }, - "parent": { - "type" : "string", - "description" : "Parent id of documents.", - "required" : false - }, "realtime": { "type": "boolean", "description": "Specifies if request is real-time as opposed to near-real-time (default: true).", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index b85c70be57d9e..02435190674cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -49,10 +49,6 @@ "type": "string", "description": "The script language (default: painless)" }, - "parent": { - "type": "string", - "description": "ID of the parent document. Is is only used for routing and when for the upsert request" - }, "refresh": { "type" : "enum", "options": ["true", "false", "wait_for"], From 18f3b27e8f73ed8a8125af180ab38055fd7ae8d6 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 20 May 2019 13:47:58 +0300 Subject: [PATCH 127/321] OpenID Connect realm guide (#41423) This commit adds a configuration guide for the newly introduced OpenID Connect realm. The guide is similar to the style of the SAML Guide and shares certain parts where applicable (role mapping) It also contains a short section on how the realm can be used for authenticating users without Kibana. Co-Authored-By: Lisa Cawley --- x-pack/docs/build.gradle | 14 + x-pack/docs/en/rest-api/security.asciidoc | 4 +- .../rest-api/security/authenticate.asciidoc | 4 +- .../security/oidc-authenticate-api.asciidoc | 2 +- .../security/oidc-logout-api.asciidoc | 2 +- .../oidc-prepare-authentication-api.asciidoc | 20 +- .../authentication/oidc-guide.asciidoc | 649 ++++++++++++++++++ 7 files changed, 679 insertions(+), 16 deletions(-) create mode 100644 x-pack/docs/en/security/authentication/oidc-guide.asciidoc diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index d7517d007d7c8..0075b4989e69f 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -73,6 +73,7 @@ File xpackResources = new File(xpackProject('plugin').projectDir, 'src/test/reso project.copyRestSpec.from(xpackResources) { include 'rest-api-spec/api/**' } +File jwks = new File(xpackProject('test:idp-fixture').projectDir, 'oidc/op-jwks.json') integTestCluster { setting 'xpack.security.enabled', 'true' setting 'xpack.security.authc.api_key.enabled', 'true' @@ -81,9 +82,22 @@ integTestCluster { setting 'xpack.monitoring.exporters._local.type', 'local' setting 'xpack.monitoring.exporters._local.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.authc.realms.file.file.order', '0' + setting 'xpack.security.authc.realms.native.native.order', '1' + setting 'xpack.security.authc.realms.oidc.oidc1.order', '2' + setting 'xpack.security.authc.realms.oidc.oidc1.op.issuer', 'http://127.0.0.1:8080' + setting 'xpack.security.authc.realms.oidc.oidc1.op.authorization_endpoint', "http://127.0.0.1:8080/c2id-login" + setting 'xpack.security.authc.realms.oidc.oidc1.op.token_endpoint', "http://127.0.0.1:8080/c2id/token" + setting 'xpack.security.authc.realms.oidc.oidc1.op.jwkset_path', 'op-jwks.json' + setting 'xpack.security.authc.realms.oidc.oidc1.rp.redirect_uri', 'https://my.fantastic.rp/cb' + setting 'xpack.security.authc.realms.oidc.oidc1.rp.client_id', 'elasticsearch-rp' + keystoreSetting 'xpack.security.authc.realms.oidc.oidc1.rp.client_secret', 'b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2' + setting 'xpack.security.authc.realms.oidc.oidc1.rp.response_type', 'id_token' + setting 'xpack.security.authc.realms.oidc.oidc1.claims.principal', 'sub' setupCommand 'setupTestAdmin', 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' waitCondition = waitWithAuth + extraConfigFile 'op-jwks.json', jwks } diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index c04bae90801ee..abad1e38d77fd 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -76,6 +76,8 @@ native realm: * <> * <> +[float] +[[security-openid-apis]] === OpenID Connect You can use the following APIs to authenticate users against an OpenID Connect @@ -110,7 +112,7 @@ include::security/get-users.asciidoc[] include::security/has-privileges.asciidoc[] include::security/invalidate-api-keys.asciidoc[] include::security/invalidate-tokens.asciidoc[] -include::security/ssl.asciidoc[] include::security/oidc-prepare-authentication-api.asciidoc[] include::security/oidc-authenticate-api.asciidoc[] include::security/oidc-logout-api.asciidoc[] +include::security/ssl.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/authenticate.asciidoc b/x-pack/docs/en/rest-api/security/authenticate.asciidoc index 51b0d64419453..d23c410a62389 100644 --- a/x-pack/docs/en/rest-api/security/authenticate.asciidoc +++ b/x-pack/docs/en/rest-api/security/authenticate.asciidoc @@ -46,11 +46,11 @@ The following example output provides information about the "rdeniro" user: "metadata": { }, "enabled": true, "authentication_realm": { - "name" : "default_file", + "name" : "file", "type" : "file" }, "lookup_realm": { - "name" : "default_file", + "name" : "file", "type" : "file" } } diff --git a/x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc b/x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc index 0efb2b23145f7..bc60e4fbf231d 100644 --- a/x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc +++ b/x-pack/docs/en/rest-api/security/oidc-authenticate-api.asciidoc @@ -51,7 +51,7 @@ POST /_security/oidc/authenticate } -------------------------------------------------- // CONSOLE -// TEST[skip:These are properly tested in the OpenIDConnectIT suite] +// TEST[catch:unauthorized] The following example output contains the access token that was generated in response, the amount of time (in seconds) that the token expires in, the type, and the refresh token: diff --git a/x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc b/x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc index 6f5288a135f2a..cb8840ca53590 100644 --- a/x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc +++ b/x-pack/docs/en/rest-api/security/oidc-logout-api.asciidoc @@ -39,7 +39,7 @@ POST /_security/oidc/logout } -------------------------------------------------- // CONSOLE -// TEST[skip:These are properly tested in the OpenIDConnectIT suite] +// TEST[catch:unauthorized] The following example output of the response contains the URI pointing to the End Session Endpoint of the OpenID Connect Provider with all the parameters of the Logout Request, as HTTP GET parameters diff --git a/x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc b/x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc index aeb400ce97ef1..a6ce410be6ee6 100644 --- a/x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc +++ b/x-pack/docs/en/rest-api/security/oidc-prepare-authentication-api.asciidoc @@ -57,20 +57,19 @@ POST /_security/oidc/prepare } -------------------------------------------------- // CONSOLE -// TEST[skip:These are properly tested in the OpenIDConnectIT suite] - The following example output of the response contains the URI pointing to the Authorization Endpoint of the OpenID Connect Provider with all the parameters of the Authentication Request, as HTTP GET parameters [source,js] -------------------------------------------------- { - "redirect" : "https://op-provider.org/login?scope=openid&response_type=code&redirect_uri=http%3A%2F%2Foidc-kibana.elastic.co%3A5603%2Fkmi%2Fapi%2Fsecurity%2Fv1%2Foidc&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I&nonce=WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM&client_id=0o43gasov3TxMWJOt839", + "redirect" : "http://127.0.0.1:8080/c2id-login?scope=openid&response_type=id_token&redirect_uri=https%3A%2F%2Fmy.fantastic.rp%2Fcb&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I&nonce=WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM&client_id=elasticsearch-rp", "state" : "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", "nonce" : "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM" } -------------------------------------------------- -// NOTCONSOLE +// TESTRESPONSE[s/4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I/\$\{body.state\}/] +// TESTRESPONSE[s/WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM/\$\{body.nonce\}/] The following example generates an authentication request for the OpenID Connect Realm `oidc1`, where the values for the state and the nonce have been generated by the client @@ -85,7 +84,6 @@ POST /_security/oidc/prepare } -------------------------------------------------- // CONSOLE -// TEST[skip:These are properly tested in the OpenIDConnectIT suite] The following example output of the response contains the URI pointing to the Authorization Endpoint of the OpenID Connect Provider with all the parameters of the Authentication Request, as HTTP GET parameters @@ -93,12 +91,12 @@ OpenID Connect Provider with all the parameters of the Authentication Request, a [source,js] -------------------------------------------------- { - "redirect" : "https://op-provider.org/login?scope=openid&response_type=code&redirect_uri=http%3A%2F%2Foidc-kibana.elastic.co%3A5603%2Fkmi%2Fapi%2Fsecurity%2Fv1%2Foidc&state=lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO&nonce=zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5&client_id=0o43gasov3TxMWJOt839", + "redirect" : "http://127.0.0.1:8080/c2id-login?scope=openid&response_type=id_token&redirect_uri=https%3A%2F%2Fmy.fantastic.rp%2Fcb&state=lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO&nonce=zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5&client_id=elasticsearch-rp", "state" : "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", "nonce" : "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5" } -------------------------------------------------- -// NOTCONSOLE +// TESTRESPONSE The following example generates an authentication request for a 3rd party initiated single sign on, specifying the issuer that should be used for matching the appropriate OpenID Connect Authentication realm @@ -107,12 +105,11 @@ issuer that should be used for matching the appropriate OpenID Connect Authentic -------------------------------------------------- POST /_security/oidc/prepare { - "issuer" : "https://op-issuer.org:8800", + "iss" : "http://127.0.0.1:8080", "login_hint": "this_is_an_opaque_string" } -------------------------------------------------- // CONSOLE -// TEST[skip:These are properly tested in the OpenIDConnectIT suite] The following example output of the response contains the URI pointing to the Authorization Endpoint of the OpenID Connect Provider with all the parameters of the Authentication Request, as HTTP GET parameters @@ -120,9 +117,10 @@ OpenID Connect Provider with all the parameters of the Authentication Request, a [source,js] -------------------------------------------------- { - "redirect" : "https://op-provider.org/login?scope=openid&response_type=code&redirect_uri=http%3A%2F%2Foidc-kibana.elastic.co%3A5603%2Fkmi%2Fapi%2Fsecurity%2Fv1%2Foidc&state=lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO&nonce=zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5&client_id=0o43gasov3TxMWJOt839&login_hint=this_is_an_opaque_string", + "redirect" : "http://127.0.0.1:8080/c2id-login?login_hint=this_is_an_opaque_string&scope=openid&response_type=id_token&redirect_uri=https%3A%2F%2Fmy.fantastic.rp%2Fcb&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I&nonce=WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM&client_id=elasticsearch-rp", "state" : "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", "nonce" : "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM" } -------------------------------------------------- -// NOTCONSOLE \ No newline at end of file +// TESTRESPONSE[s/4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I/\$\{body.state\}/] +// TESTRESPONSE[s/WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM/\$\{body.nonce\}/] \ No newline at end of file diff --git a/x-pack/docs/en/security/authentication/oidc-guide.asciidoc b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc new file mode 100644 index 0000000000000..df5ce11c63c14 --- /dev/null +++ b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc @@ -0,0 +1,649 @@ +[role="xpack"] +[[oidc-guide]] + +== Configuring single sign-on to the {stack} using OpenID Connect + +The Elastic Stack supports single sign-on (SSO) using OpenID Connect via {kib} using +{es} as the backend service that holds most of the functionality. {kib} and {es} +together represent an OpenID Connect Relying Party (RP) that supports the Authorization +Code Flow as this is defined in the OpenID Connect specification. + +This guide assumes that you have an OpenID Connect Provider where the +Elastic Stack Relying Party will be registered. + +NOTE: The OpenID Connect realm support in {kib} is designed with the expectation that it +will be the primary authentication method for the users of that {kib} instance. The +<> section describes what this entails and how you can set it up to support +other realms if necessary. + +[[oidc-guide-op]] +=== The OpenID Connect Provider + +The OpenID Connect Provider (OP) is the entity in OpenID Connect that is responsible for +authenticating the user and for granting the necessary tokens with the authentication and +user information to be consumed by the Relying Parties. + +In order for the Elastic Stack to be able use your OpenID Connect Provider for authentication, +a trust relationship needs to be established between the OP and the RP. In the OpenID Connect +Provider, this means registering the RP as a client. OpenID Connect defines a dynamic client +registration protocol but this is usually geared towards real-time client registration and +not the trust establishment process for cross security domain single sign on. All OPs will +also allow for the manual registration of an RP as a client, via a user interface or (less often) +via the consumption of a metadata document. + +The process for registering the Elastic Stack RP will be different from OP to OP and following +the provider's relevant documentation is prudent. The information for the +RP that you commonly need to provide for registration are the following: + +- `Relying Party Name`: An arbitrary identifier for the relying party. Neither the specification +nor the Elastic Stack implementation impose any constraints on this value. +- `Redirect URI`: This is the URI where the OP will redirect the user's browser after authentication. The +appropriate value for this will depend on your setup and whether or not {kib} sits behind a proxy or +load balancer. It will typically be +$\{kibana-url}/api/security/v1/oidc+ where _$\{kibana-url}_ +is the base URL for your {kib} instance. You might also see this called `Callback URI`. + +At the end of the registration process, the OP will assign a Client Identifier and a Client Secret for the RP ({stack}) to use. +Note these two values as they will be used in the {es} configuration. + +[[oidc-guide-authentication]] +=== Configure {es} for OpenID Connect authentication + +The following is a summary of the configuration steps required in order to enable authentication +using OpenID Connect in {es}: + +. <> +. <> +. <> +. <> + +[[oidc-enable-http]] +==== Enable TLS for HTTP + +If your {es} cluster is operating in production mode, then you must +configure the HTTP interface to use SSL/TLS before you can enable OpenID Connect +authentication. + +For more information, see +{ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. + +[[oidc-enable-token]] +==== Enable the token service + +The {es} OpenID Connect implementation makes use of the {es} Token Service. This service +is automatically enabled if you configure TLS on the HTTP interface, and can be +explicitly configured by including the following in your `elasticsearch.yml` file: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.token.enabled: true +------------------------------------------------------------ + +[[oidc-create-realm]] +==== Create an OpenID Connect realm + +OpenID Connect based authentication is enabled by configuring the appropriate realm within +the authentication chain for {es}. + +This realm has a few mandatory settings, and a number of optional settings. +The available settings are described in detail in the +{ref}/security-settings.html#ref-oidc-settings[Security settings in {es}]. This +guide will explore the most common settings. + +Create an OpenID Connect (the realm type is `oidc`) realm in your `elasticsearch.yml` file +similar to what is shown below: + +NOTE: The values used below are meant to be an example and are not intended to apply to +every use case. The details below the configuration snippet provide insights and suggestions +to help you pick the proper values, depending on your OP configuration. + +[source, yaml] +------------------------------------------------------------------------------------- +xpack.security.authc.realms.oidc.oidc1: + order: 2 + rp.client_id: "the_client_id" + rp.response_type: code + rp.redirect_uri: "https://kibana.example.org:5601/api/security/v1/oidc" + op.authorization_endpoint: "https://op.example.org/oauth2/v1/authorize" + op.token_endpoint: "https://op.example.org/oauth2/v1/token" + op.userinfo_endpoint: "https://op.example.org/oauth2/v1/userinfo" + op.endsession_endpoint: "https://op.example.org/oauth2/v1/logout" + op.issuer: "https://op.example.org" + op.jwkset_path: oidc/jwkset.json + claims.principal: sub + claims.groups: "http://example.info/claims/groups" +------------------------------------------------------------------------------------- + +The configuration values used in the example above are: + +xpack.security.authc.realms.oidc.oidc1:: + This defines a new `oidc` authentication realm named "oidc1". + See <> for more explanation of realms. + +order:: + You should define a unique order on each realm in your authentication chain. + It is recommended that the OpenID Connect realm be at the bottom of your authentication + chain (that is, that it has the _highest_ order). + +rp.client_id:: + This, usually opaque, arbitrary string, is the Client Identifier that was assigned to the Elastic Stack RP by the OP upon + registration. + +rp.response_type:: + This is an identifier that controls which OpenID Connect authentication flow this RP supports and also + which flow this RP requests the OP should follow. Supported values are + - `code`, which means that the RP wants to use the Authorization Code flow. If your OP supports the + Authorization Code flow, you should select this instead of the Implicit Flow. + - `id_token token` which means that the RP wants to use the Implicit flow and we also request an oAuth2 + access token from the OP, that we can potentially use for follow up requests ( UserInfo ). This + should be selected if the OP offers a UserInfo endpoint in its configuration, or if you know that + the claims you will need to use for role mapping are not available in the ID Token. + - `id_token` which means that the RP wants to use the Implicit flow, but is not interested in getting + an oAuth2 token too. Select this if you are certain that all necessary claims will be contained in + the ID Token or if the OP doesn't offer a User Info endpoint. + +rp.redirect_uri:: + The redirect URI where the OP will redirect the browser after authentication. This needs to be + _exactly_ the same as the one <> and will + typically be +$\{kibana-url}/api/security/v1/oidc+ where _$\{kibana-url}_ is the base URL for your {kib} instance + +op.authorization_endpoint:: + The URL for the Authorization Endpoint in the OP. This is where the user's browser + will be redirected to start the authentication process. The value for this setting should be provided by your + OpenID Connect Provider. + +op.token_endpoint:: + The URL for the Token Endpoint in the OpenID Connect Provider. This is the endpoint where + {es} will send a request to exchange the code for an ID Token, in the case where the Authorization Code + flow is used. The value for this setting should be provided by your OpenID Connect Provider. + +op.userinfo_endpoint:: + (Optional) The URL for the UserInfo Endpoint in the OpenID Connect Provider. This is the endpoint of the OP that + can be queried to get further user information, if required. The value for this setting should be provided by your + OpenID Connect Provider. + +op.endsession_endpoint:: + (Optional) The URL to the End Session Endpoint in the OpenID Connect Provider. This is the endpoint where the user's + browser will be redirected after local logout, if the realm is configured for RP initiated Single Logout and + the OP supports it. The value for this setting should be provided by your OpenID Connect Provider. + +op.jwkset_path:: + The path to a file containing a JSON Web Key Set with the key material that the OpenID Connect + Provider uses for signing tokens and claims responses. The path is resolved relative to the {es} + config directory. + {es} will automatically monitor this file for changes and will reload the configuration whenever + it is updated. Your OpenID Connect Provider should provide you with this file. + +claims.principal:: See <>. +claims.groups:: See <>. + +A final piece of configuration of the OpenID Connect realm is to set the `Client Secret` that was assigned +to the RP during registration in the OP. This is a secure setting and as such is not defined in the realm +configuration in `elasticsearch.yml` but added to the {ref}/secure-settings.html[elasticsearch keystore]. +For instance + + +[source,sh] +---- +bin/elasticsearch-keystore add xpack.security.authc.realms.oidc.oidc1.rp.client_secret +---- + + +NOTE: According to the OpenID Connect specification, the OP should also make their configuration +available at a well known URL, which is the concatenation of their `Issuer` value with the +`.well-known/openid-configuration` string. For example: `https://op.org.com/.well-known/openid-configuration` +That document should contain all the necessary information to configure the OpenID Connect realm in {es}. + + +[[oidc-claims-mapping]] +==== Claims mapping + +===== Claims and scopes + +When authenticating to {kib} using OpenID Connect, the OP will provide information about the user +in the form of OpenID Connect Claims, that can be included either in the ID Token, or be retrieved from the +UserInfo endpoint of the OP. The claim is defined as a piece of information asserted by the OP +for the authenticated user. Simply put, a claim is a name/value pair that contains information about +the user. Related to claims, we also have the notion of OpenID Connect Scopes. Scopes are identifiers +that are used to request access to specific lists of claims. The standard defines a set of scope +identifiers that can be requested. The only mandatory one is `openid`, while commonly used ones are +`profile` and `email`. The `profile` scope requests access to the `name`,`family_name`,`given_name`,`middle_name`,`nickname`, +`preferred_username`,`profile`,`picture`,`website`,`gender`,`birthdate`,`zoneinfo`,`locale`, and `updated_at` claims. +The `email` scope requests access to the `email` and `email_verified` claims. The process is that +the RP requests specific scopes during the authentication request. If the OP Privacy Policy +allows it and the authenticating user consents to it, the related claims are returned to the +RP (either in the ID Token or as a UserInfo response). + +The list of the supported claims will vary depending on the OP you are using, but you can expect +the https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims[Standard Claims] to be +largely supported. + +[[oidc-claim-to-property]] +===== Mapping claims to user properties + +The goal of claims mapping is to configure {es} in such a way as to be able to map the values of +specified returned claims to one of the <> that are supported +by {es}. These user properties are then utilized to identify the user in the {kib} UI or the audit +logs, and can also be used to create <> rules. + +The recommended steps for configuring OpenID Claims mapping are as follows: + +. Consult your OP configuration to see what claims it might support. Note that + the list provided in the OP's metadata or in the configuration page of the OP + is a list of potentially supported claims. However, for privacy reasons it might + not be a complete one, or not all supported claims will be available for all + authenticated users. + +. Read through the list of <> that {es} + supports, and decide which of them are useful to you, and can be provided by + your OP in the form of claims. At a _minimum_, the `principal` user property + is required. + +. Configure your OP to "release" those claims to your {stack} Relying + party. This process greatly varies by provider. You can use a static + configuration while others will support that the RP requests the scopes that + correspond to the claims to be "released" on authentication time. See + {ref}/security-settings.html#ref-oidc-settings[`rp.requested_scopes`] for details about how + to configure the scopes to request. To ensure interoperability and minimize + the errors, you should only request scopes that the OP supports, and which you + intend to map to {es} user properties. + +. Configure the OpenID Connect realm in {es} to associate the {es} user properties (see + <> below), to the name of the claims that your + OP will release. In the example above, we have configured the `principal` and + `groups` user properties as follows: + + .. `claims.principal: sub` : This instructs {es} to look for the OpenID Connect claim named `sub` + in the ID Token that the OP issued for the user ( or in the UserInfo response ) and assign the + value of this claim to the `principal` user property. `sub` is a commonly used claim for the + principal property as it is an identifier of the user in the OP and it is also a required + claim of the ID Token, thus offering guarantees that it will be available. It is, however, + only used as an example here, the OP may provide another claim that is a better fit for your needs. + + .. `claims.groups: "http://example.info/claims/groups"` : Similarly, this instructs {es} to look + for the claim with the name `http://example.info/claims/groups` (note that this is a URI - an + identifier, treated as a string and not a URL pointing to a location that will be retrieved) + either in the ID Token or in the UserInfo response, and map the value(s) of it to the user + property `groups` in {es}. There is no standard claim in the specification that is used for + expressing roles or group memberships of the authenticated user in the OP, so the name of the + claim that should be mapped here, will vary greatly between providers. Consult your OP + documentation for more details. + +[[oidc-user-properties]] +===== {es} user properties + +The {es} OpenID Connect realm can be configured to map OpenID Connect claims to the +following properties on the authenticated user: + +principal:: _(Required)_ + This is the _username_ that will be applied to a user that authenticates + against this realm. + The `principal` appears in places such as the {es} audit logs. + +NOTE: If the principal property fails to be mapped from a claim, the authentication fails. + +groups:: _(Recommended)_ + If you wish to use your OP's concept of groups or roles as the basis for a + user's {es} privileges, you should map them with this property. + The `groups` are passed directly to your <>. + +name:: _(Optional)_ The user's full name. +mail:: _(Optional)_ The user's email address. +dn:: _(Optional)_ The user's X.500 _Distinguished Name_. + + +===== Extracting partial values from OpenID Connect claims + +There are some occasions where the value of a claim may contain more information +than you wish to use within {es}. A common example of this is one where the +OP works exclusively with email addresses, but you would like the user's +`principal` to use the _local-name_ part of the email address. +For example if their email address was `james.wong@staff.example.com`, then you +would like their principal to simply be `james.wong`. + +This can be achieved using the `claim_patterns` setting in the {es} +realm, as demonstrated in the realm configuration below: + +[source, yaml] +------------------------------------------------------------------------------------- +xpack.security.authc.realms.oidc.oidc1: + rp.client_id: "the_client_id" + rp.response_type: code + rp.redirect_uri: "https://kibana.example.org:5601/api/security/v1/oidc" + op.authorization_endpoint: "https://op.example.org/oauth2/v1/authorize" + op.token_endpoint: "https://op.example.org/oauth2/v1/token" + op.userinfo_endpoint: "https://op.example.org/oauth2/v1/userinfo" + op.endsession_endpoint: "https://op.example.org/oauth2/v1/logout" + op.issuer: "https://op.example.org" + op.jwkset_path: oidc/jwkset.json + claims.principal: email_verified + claim_patterns.principal: "^([^@]+)@staff\\.example\\.com$" +------------------------------------------------------------------------------------- + +In this case, the user's `principal` is mapped from the `email_verified` claim, but a +regular expression is applied to the value before it is assigned to the user. +If the regular expression matches, then the result of the first group is used as the +effective value. If the regular expression does not match then the claim +mapping fails. + +In this example, the email address must belong to the `staff.example.com` domain, +and then the local-part (anything before the `@`) is used as the principal. +Any users who try to login using a different email domain will fail because the +regular expression will not match against their email address, and thus their +principal user property - which is mandatory - will not be populated. + +IMPORTANT: Small mistakes in these regular expressions can have significant +security consequences. For example, if we accidentally left off the trailing +`$` from the example above, then we would match any email address where the +domain starts with `staff.example.com`, and this would accept an email +address such as `admin@staff.example.com.attacker.net`. It is important that +you make sure your regular expressions are as precise as possible so that +you do not inadvertently open an avenue for user impersonation attacks. + +[[third-party-login]] +==== Third party initiated single sign-on + +The Open ID Connect realm in {es} supports 3rd party initiated login as described in the +https://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin[relevant specification]. + +This allows the OP itself or another, third party other than the RP, to initiate the authentication +process while requesting the OP to be used for the authentication. Please note that the Elastic +Stack RP should already be configured for this OP, in order for this process to succeed. + + +[[oidc-logout]] +==== OpenID Connect Logout + +The OpenID Connect realm in {es} supports RP-Initiated Logout Functionality as +described in the +https://openid.net/specs/openid-connect-session-1_0.html#RPLogout[relevant part of the specification] + +In this process, the OpenID Connect RP (the Elastic Stack in this case) will redirect the user's +browser to predefined URL of the OP after successfully completing a local logout. The OP can then +logout the user also, depending on the configuration, and should finally redirect the user back to the +RP. The `op.endsession_endpoint` in the realm configuration determines the URL in the OP that the browser +will be redirected to. The `rp.post_logout_redirect_uri` setting determines the URL to redirect +the user back to after the OP logs them out. + +When configuring `rp.post_logout_redirect_uri`, care should be taken to not point this to a URL that +will trigger re-authentication of the user. For instance, when using OpenID Connect to support +single sign-on to {kib}, this could be set to +$\{kibana-url}/logged_out+, which will show a user- +friendly message to the user. + +[[oidc-role-mapping]] +=== Configuring role mappings + +When a user authenticates using OpenID Connect, they are identified to the Elastic Stack, +but this does not automatically grant them access to perform any actions or +access any data. + +Your OpenID Connect users cannot do anything until they are assigned roles. This can be done +through either the +{ref}/security-api-put-role-mapping.html[add role mapping API], or with +<>. + +NOTE: You cannot use {stack-ov}/mapping-roles.html#mapping-roles-file[role mapping files] +to grant roles to users authenticating via OpenID Connect. + +This is an example of a simple role mapping that grants the `kibana_user` role +to any user who authenticates against the `oidc1` OpenID Connect realm: + +[source,js] +-------------------------------------------------- +PUT /_security/role_mapping/oidc-kibana +{ + "roles": [ "kibana_user" ], + "enabled": true, + "rules": { + "field": { "realm.name": "oidc1" } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + + +The user properties that are mapped via the realm configuration are used to process +role mapping rules, and these rules determine which roles a user is granted. + +The user fields that are provided to the role +mapping are derived from the OpenID Connect claims as follows: + +- `username`: The `principal` user property +- `dn`: The `dn` user property +- `groups`: The `groups` user property +- `metadata`: See <> + +For more information, see <> and +{ref}/security-api.html#security-role-mapping-apis[role mapping APIs]. + +If your OP has the ability to provide groups or roles to RPs via tha use of +an OpenID Claim, then you should map this claim to the `claims.groups` setting in +the {es} realm (see <>), and then make use of it in a role mapping +as per the example below. + +This mapping grants the {es} `finance_data` role, to any users who authenticate +via the `oidc1` realm with the `finance-team` group membership. + +[source,js] +-------------------------------------------------- +PUT /_security/role_mapping/oidc-finance +{ + "roles": [ "finance_data" ], + "enabled": true, + "rules": { "all": [ + { "field": { "realm.name": "oidc1" } }, + { "field": { "groups": "finance-team" } } + ] } +} +-------------------------------------------------- +// CONSOLE +// TEST + +If your users also exist in a repository that can be directly accessed by {es} +(such as an LDAP directory) then you can use +<> instead of role mappings. + +In this case, you perform the following steps: +1. In your OpenID Connect realm, assign a claim to act as the lookup userid, + by configuring the `claims.principal` setting. +2. Create a new realm that can lookup users from your local repository (e.g. an + `ldap` realm) +3. In your OpenID Connect realm, set `authorization_realms` to the name of the realm you + created in step 2. + +[[oidc-user-metadata]] +=== User metadata + +By default users who authenticate via OpenID Connect will have some additional metadata +fields. These fields will include every OpenID Claim that is provided in the authentication response +(regardless of whether it is mapped to an {es} user property). For example, +in the metadata field `oidc(claim_name)`, "claim_name" is the name of the +claim as it was contained in the ID Token or in the User Info response. Note that these will +include all the https://openid.net/specs/openid-connect-core-1_0.html#IDToken[ID Token claims] +that pertain to the authentication event, rather than the user themselves. + +This behaviour can be disabled by adding `populate_user_metadata: false` as +a setting in the oidc realm. + +[[oidc-kibana]] +=== Configuring {kib} + +OpenID Connect authentication in {kib} requires a small number of additional settings +in addition to the standard {kib} security configuration. The +{kibana-ref}/using-kibana-with-security.html[{kib} security documentation] +provides details on the available configuration options that you can apply. + +In particular, since your {es} nodes have been configured to use TLS on the HTTP +interface, you must configure {kib} to use a `https` URL to connect to {es}, and +you may need to configure `elasticsearch.ssl.certificateAuthorities` to trust +the certificates that {es} has been configured to use. + +OpenID Connect authentication in {kib} is also subject to the +`xpack.security.sessionTimeout` setting that is described in the {kib} security +documentation, and you may wish to adjust this timeout to meet your local needs. + +The three additional settings that are required for OpenID Connect support are shown below: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authProviders: [oidc] +xpack.security.auth.oidc.realm: "oidc1" +server.xsrf.whitelist: [/api/security/v1/oidc] +------------------------------------------------------------ + +The configuration values used in the example above are: + +`xpack.security.authProviders`:: +Set this to `[ oidc ]` to instruct {kib} to use OpenID Connect single sign-on as the +authentication method. This instructs Kibana to attempt to initiate an SSO flow +everytime a user attempts to access a URL in Kibana, if the user is not already +authenticated. If you also want to allow users to login with a username and password, +you must enable the `basic` authProvider too. For example: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authProviders: [oidc, basic] +------------------------------------------------------------ + +This will allow users that haven't already authenticated with OpenID Connect to +navigate directly to the `/login` page in {kib} in order to use the login form. + +`xpack.security.auth.oidc.realm`:: +The name of the OpenID Connect realm in {es} that should handle authentication +for this Kibana instance. + +`server.xsrf.whitelist`:: +{kib} has in-built protection against _Cross Site Request Forgery_ attacks, which +is designed to prevent the {kib} server from processing requests that +originated from outside the {kib} application. +In order to support OpenID Connect messages that originate from your +OP or a third party (see <>, we need to explicitly _whitelist_ the +OpenID Connect authentication endpoint within {kib}, so that the {kib} server will +not reject these external messages. + + +=== OpenID Connect without {kib} + +The OpenID Connect realm is designed to allow users to authenticate to {kib} and as +such, most of the parts of the guide above make the assumption that {kib} is used. +This section describes how a custom web application could use the relevant OpenID +Connect REST APIs in order to authenticate the users to {es}, with OpenID Connect. + +Single sign-on realms such as OpenID Connect and SAML make use of the Token Service in +{es} and in principle exchange a SAML or OpenID Connect Authentication response for +an {es} access token and a refresh token. The access token is used as credentials for subsequent calls to {es}. The +refresh token enables the user to get new {es} access tokens after the current one +expires. + +NOTE: The {es} Token Service can be seen as a minimal oAuth2 authorization server +and the access token and refresh token mentioned above are tokens that pertain +_only_ to this authorization server. They are generated and consumed _only_ by {es} +and are in no way related to the tokens ( access token and ID Token ) that the +OpenID Connect Provider issues. + +==== Register the RP with an OpenID Connect Provider + +The Relying Party ( {es} and the custom web app ) will need to be registered as +client with the OpenID Connect Provider. Note that when registering the +`Redirect URI`, it needs to be a URL in the custom web app. + +==== OpenID Connect Realm + +An OpenID Connect realm needs to be created and configured accordingly +in {es}. See <> + +==== Service Account user for accessing the APIs + +The realm is designed with the assumption that there needs to be a privileged entity +acting as an authentication proxy. In this case, the custom web application is the +authentication proxy handling the authentication of end users ( more correctly, +"delegating" the authentication to the OpenID Connect Provider ). The OpenID Connect +APIs require authentication and the necessary authorization level for the authenticated +user. For this reason, a Service Account user needs to be created and assigned a role +that gives them the `manage_oidc` cluster privilege. The use of the `manage_token` +cluster privilege will be necessary after the authentication takes place, so that the +the user can maintain access or be subsequently logged out. + +[source,js] +-------------------------------------------------- +POST /_security/role/facilitator-role +{ + "cluster" : ["manage_oidc", "manage_token"] +} +-------------------------------------------------- +// CONSOLE + + +[source,js] +-------------------------------------------------- +POST /_security/user/facilitator +{ + "password" : "", + "roles" : [ "facilitator-role"] +} +-------------------------------------------------- +// CONSOLE + + +==== Handling the authentication flow + +On a high level, the custom web application would need to perform the following steps in order to +authenticate a user with OpenID Connect: + +. Make an HTTP POST request to `_security/oidc/prepare`, authenticating as the `facilitator` user, using the name of the +OpenID Connect realm in the {es} configuration in the request body. See the +{ref}/security-api-oidc-prepare-authentication.html[OIDC Prepare Authentication API] for more details ++ +[source,js] +-------------------------------------------------- +POST /_security/oidc/prepare +{ + "realm" : "oidc1" +} +-------------------------------------------------- +// CONSOLE ++ +. Handle the response to `/_security/oidc/prepare`. The response from {es} will contain 3 parameters: + `redirect`, `state`, `nonce`. The custom web application would need to store the values for `state` + and `nonce` in the user's session (client side in a cookie or server side if session information is + persisted this way) and redirect the user's browser to the URL that will be contained in the + `redirect` value. +. Handle a subsequent response from the OP. After the user is successfully authenticated with the + OpenID Connect Provider, they will be redirected back to the callback/redirect URI. Upon receiving + this HTTP GET request, the custom web app will need to make an HTTP POST request to + `_security/oidc/authenticate`, again - authenticating as the `facilitator` user - passing the URL + where the user's browser was redirected to, as a parameter, along with the + values for `nonce` and `state` it had saved in the user's session previously. + See {ref}/security-api-oidc-authenticate.html[OIDC Authenticate API] for more details ++ +[source,js] +----------------------------------------------------------------------- +POST /_security/oidc/authenticate +{ + "redirect_uri" : "https://oidc-kibana.elastic.co:5603/api/security/v1/oidc?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + "state" : "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + "nonce" : "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM" +} +----------------------------------------------------------------------- +// CONSOLE +// TEST[catch:unauthorized] ++ +Elasticsearch will validate this and if all is correct will respond with an access token that can be used + as a `Bearer` token for subsequent requests and a refresh token that can be later used to refresh the given + access token as described in {ref}/security-api-get-token.html[get token API]. +. At some point, if necessary, the custom web application can log the user out by using the + {ref}/security-api-oidc-logout.html[OIDC Logout API] passing the access token and refresh token as parameters. For example: ++ +[source,js] +-------------------------------------------------- +POST /_security/oidc/logout +{ + "token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + "refresh_token": "vLBPvmAB6KvwvJZr27cS" +} +-------------------------------------------------- +// CONSOLE +// TEST[catch:unauthorized] ++ +If the realm is configured accordingly, this may result in a response with a `redirect` parameter indicating where +the user needs to be redirected in the OP in order to complete the logout process. From cc988ce335257873027b70b8bab3f332be2a04d9 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 20 May 2019 13:26:21 +0100 Subject: [PATCH 128/321] [ML Data Frame] Start directly data frame rather than via the scheduler (#42067) Trigger indexer start directly to put the indexer in INDEXING state immediately --- .../client/DataFrameTransformIT.java | 4 ++- ...FrameTransformPersistentTasksExecutor.java | 24 ++++--------- .../transforms/DataFrameTransformTask.java | 35 ++++++++++++++----- .../test/data_frame/transforms_start_stop.yml | 8 ++--- .../test/data_frame/transforms_stats.yml | 6 ++-- 5 files changed, 43 insertions(+), 34 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 1bd49154ee548..40cd6f454cdab 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -72,6 +72,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.oneOf; public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { @@ -264,7 +265,8 @@ public void testStartStop() throws IOException { GetDataFrameTransformStatsResponse statsResponse = execute(new GetDataFrameTransformStatsRequest(id), client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync); assertThat(statsResponse.getTransformsStateAndStats(), hasSize(1)); - assertEquals(IndexerState.STARTED, statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState()); + IndexerState indexerState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState(); + assertThat(indexerState, is(oneOf(IndexerState.STARTED, IndexerState.INDEXING))); StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, Boolean.TRUE, null); StopDataFrameTransformResponse stopResponse = diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java index d0f15197c3cca..5b0c0e7dfc19b 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java @@ -106,8 +106,6 @@ static List verifyIndicesPrimaryShardsAreActive(ClusterState clusterStat protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTransform params, PersistentTaskState state) { final String transformId = params.getId(); final DataFrameTransformTask buildTask = (DataFrameTransformTask) task; - final SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job(DataFrameTransformTask.SCHEDULE_NAME + "_" + transformId, - next()); final DataFrameTransformState transformState = (DataFrameTransformState) state; final DataFrameTransformTask.ClientDataFrameIndexerBuilder indexerBuilder = @@ -137,7 +135,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTr stats -> { indexerBuilder.setInitialStats(stats); buildTask.initializeIndexer(indexerBuilder); - scheduleAndStartTask(buildTask, schedulerJob, startTaskListener); + startTask(buildTask, startTaskListener); }, error -> { if (error instanceof ResourceNotFoundException == false) { @@ -145,7 +143,7 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTr } indexerBuilder.setInitialStats(new DataFrameIndexerTransformStats(transformId)); buildTask.initializeIndexer(indexerBuilder); - scheduleAndStartTask(buildTask, schedulerJob, startTaskListener); + startTask(buildTask, startTaskListener); } ); @@ -218,30 +216,20 @@ private void markAsFailed(DataFrameTransformTask task, String reason) { } } - private void scheduleAndStartTask(DataFrameTransformTask buildTask, - SchedulerEngine.Job schedulerJob, - ActionListener listener) { - // Note that while the task is added to the scheduler here, the internal state will prevent - // it from doing any work until the task is "started" via the StartTransform api - schedulerEngine.register(buildTask); - schedulerEngine.add(schedulerJob); - logger.info("Data frame transform [{}] created.", buildTask.getTransformId()); + private void startTask(DataFrameTransformTask buildTask, + ActionListener listener) { // If we are stopped, and it is an initial run, this means we have never been started, // attempt to start the task if (buildTask.getState().getTaskState().equals(DataFrameTransformTaskState.STOPPED) && buildTask.isInitialRun()) { + logger.info("Data frame transform [{}] created.", buildTask.getTransformId()); buildTask.start(listener); + } else { logger.debug("No need to start task. Its current state is: {}", buildTask.getState().getIndexerState()); listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); } } - static SchedulerEngine.Schedule next() { - return (startTime, now) -> { - return now + 1000; // to be fixed, hardcode something - }; - } - @Override protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId, PersistentTasksCustomMetaData.PersistentTask persistentTask, Map headers) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index bfe0e4f4d77b1..ee8767e2235df 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -208,6 +208,10 @@ public synchronized void start(ActionListener listener) { persistStateToClusterState(state, ActionListener.wrap( task -> { auditor.info(transform.getId(), "Updated state to [" + state.getTaskState() + "]"); + long now = System.currentTimeMillis(); + // kick off the indexer + triggered(new Event(schedulerJobName(), now, now)); + registerWithSchedulerJob(); listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); }, exc -> { @@ -238,7 +242,7 @@ public synchronized void triggered(Event event) { return; } // for now no rerun, so only trigger if checkpoint == 0 - if (currentCheckpoint.get() == 0 && event.getJobName().equals(SCHEDULE_NAME + "_" + transform.getId())) { + if (currentCheckpoint.get() == 0 && event.getJobName().equals(schedulerJobName())) { logger.debug("Data frame indexer [{}] schedule has triggered, state: [{}]", event.getJobName(), getIndexer().getState()); getIndexer().maybeTriggerAsyncJob(System.currentTimeMillis()); } @@ -249,13 +253,7 @@ public synchronized void triggered(Event event) { * This tries to remove the job from the scheduler and completes the persistent task */ synchronized void shutdown() { - try { - schedulerEngine.remove(SCHEDULE_NAME + "_" + transform.getId()); - schedulerEngine.unregister(this); - } catch (Exception e) { - markAsFailed(e); - return; - } + deregisterSchedulerJob(); markAsCompleted(); } @@ -311,6 +309,27 @@ public synchronized void onCancelled() { } } + private void registerWithSchedulerJob() { + schedulerEngine.register(this); + final SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job(schedulerJobName(), next()); + schedulerEngine.add(schedulerJob); + } + + private void deregisterSchedulerJob() { + schedulerEngine.remove(schedulerJobName()); + schedulerEngine.unregister(this); + } + + private String schedulerJobName() { + return DataFrameTransformTask.SCHEDULE_NAME + "_" + getTransformId(); + } + + private SchedulerEngine.Schedule next() { + return (startTime, now) -> { + return now + 1000; // to be fixed, hardcode something + }; + } + synchronized void initializeIndexer(ClientDataFrameIndexerBuilder indexerBuilder) { indexer.set(indexerBuilder.build(this)); } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index 1e9223b79f201..8b30fd1186b5b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -100,7 +100,7 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - do: @@ -127,7 +127,7 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } --- @@ -168,7 +168,7 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - do: @@ -194,7 +194,7 @@ teardown: transform_id: "airline-transform-start-later" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-later" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - do: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index 33b0f40863a79..bedeea18a1545 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -47,13 +47,13 @@ teardown: transform_id: "airline-transform-stats" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-stats" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - match: { transforms.0.state.checkpoint: 0 } - match: { transforms.0.stats.pages_processed: 0 } - match: { transforms.0.stats.documents_processed: 0 } - match: { transforms.0.stats.documents_indexed: 0 } - - match: { transforms.0.stats.trigger_count: 0 } + - match: { transforms.0.stats.trigger_count: 1 } - match: { transforms.0.stats.index_time_in_ms: 0 } - match: { transforms.0.stats.index_total: 0 } - match: { transforms.0.stats.index_failures: 0 } @@ -172,7 +172,7 @@ teardown: transform_id: "_all" - match: { count: 2 } - match: { transforms.0.id: "airline-transform-stats" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.1.id: "airline-transform-stats-dos" } - match: { transforms.1.state.indexer_state: "stopped" } From 9002be4e610aca09392243d664b37f6358f0fcc1 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 20 May 2019 09:06:42 -0400 Subject: [PATCH 129/321] [DOCS] Updates TLS configuration info (#41983) --- .../configuring-tls-docker.asciidoc | 6 ++---- .../securing-elasticsearch.asciidoc | 4 ++-- .../setting-up-ssl.asciidoc | 15 +++++++-------- .../reference/setup/bootstrap-checks-xes.asciidoc | 5 ++--- .../en/security/securing-communications.asciidoc | 3 +-- 5 files changed, 14 insertions(+), 19 deletions(-) diff --git a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc index 2bc2300174ecc..1d23430e37eec 100644 --- a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc +++ b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc @@ -2,10 +2,8 @@ [[configuring-tls-docker]] === Encrypting communications in an {es} Docker Container -Starting with version 6.0.0, {stack} {security-features} -(Gold, Platinum or Enterprise subscriptions) -https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-6.0.0-xes.html[require SSL/TLS] -encryption for the transport networking layer. +Unless you are using a trial license, {stack} {security-features} require +SSL/TLS encryption for the transport networking layer. This section demonstrates an easy path to get started with SSL/TLS for both HTTPS and transport using the {es} Docker image. The example uses diff --git a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc b/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc index 9d207f26a96b6..a24e272dd8937 100644 --- a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc +++ b/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc @@ -7,8 +7,8 @@ your {es} cluster. Connections are secured using Transport Layer Security (TLS/SSL). WARNING: Clusters that do not have encryption enabled send all data in plain text -including passwords and will not be able to install a license that enables -{security-features}. +including passwords. If the {es} {security-features} are enabled, unless you +have a trial license, you must configure SSL/TLS for internode-communication. To enable encryption, you need to perform the following steps on each node in the cluster: diff --git a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc index 90f9b040d9d54..68eda2cdc3e09 100644 --- a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc +++ b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc @@ -1,16 +1,15 @@ [[ssl-tls]] -=== Setting Up TLS on a cluster +=== Setting up TLS on a cluster -The {stack} {security-features} enables you to encrypt traffic to, from, and +The {stack} {security-features} enable you to encrypt traffic to, from, and within your {es} cluster. Connections are secured using Transport Layer Security (TLS), which is commonly referred to as "SSL". WARNING: Clusters that do not have encryption enabled send all data in plain text -including passwords and will not be able to install a license that enables -{security-features}. +including passwords. If the {es} {security-features} are enabled, unless you have a trial license, you must configure SSL/TLS for internode-communication. The following steps describe how to enable encryption across the various -components of the Elastic Stack. You must perform each of the steps that are +components of the {stack}. You must perform each of the steps that are applicable to your cluster. . Generate a private key and X.509 certificate for each of your {es} nodes. See @@ -22,14 +21,14 @@ enable TLS on the HTTP layer. See {ref}/configuring-tls.html#tls-transport[Encrypting Communications Between Nodes in a Cluster] and {ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. -. Configure {monitoring} to use encrypted connections. See <>. +. Configure the {monitor-features} to use encrypted connections. See <>. . Configure {kib} to encrypt communications between the browser and the {kib} server and to connect to {es} via HTTPS. See -{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. +{kibana-ref}/using-kibana-with-security.html[Configuring security in {kib}]. . Configure Logstash to use TLS encryption. See -{logstash-ref}/ls-security.html[Configuring Security in Logstash]. +{logstash-ref}/ls-security.html[Configuring security in {ls}]. . Configure Beats to use encrypted connections. See <>. diff --git a/docs/reference/setup/bootstrap-checks-xes.asciidoc b/docs/reference/setup/bootstrap-checks-xes.asciidoc index df020bbd96276..37c90e9f4d9a3 100644 --- a/docs/reference/setup/bootstrap-checks-xes.asciidoc +++ b/docs/reference/setup/bootstrap-checks-xes.asciidoc @@ -53,9 +53,8 @@ must also be valid. === SSL/TLS check //See TLSLicenseBootstrapCheck.java -In 6.0 and later releases, if you have a gold, platinum, or enterprise license -and {es} {security-features} are enabled, you must configure SSL/TLS for -internode-communication. +If you enable {es} {security-features}, unless you have a trial license, you +must configure SSL/TLS for internode-communication. NOTE: Single-node clusters that use a loopback interface do not have this requirement. For more information, see diff --git a/x-pack/docs/en/security/securing-communications.asciidoc b/x-pack/docs/en/security/securing-communications.asciidoc index 63fded729eb8c..6672c0316493e 100644 --- a/x-pack/docs/en/security/securing-communications.asciidoc +++ b/x-pack/docs/en/security/securing-communications.asciidoc @@ -5,8 +5,7 @@ Elasticsearch nodes store data that may be confidential. Attacks on the data may come from the network. These attacks could include sniffing of the data, manipulation of the data, and attempts to gain access to the server and thus the -files storing the data. Securing your nodes is required in order to use a production -license that enables {security-features} and helps reduce the risk from +files storing the data. Securing your nodes helps reduce the risk from network-based attacks. This section shows how to: From 0635f5a9e24d6b796b87270fe1cabd6dd42ce58f Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Mon, 20 May 2019 09:46:37 -0400 Subject: [PATCH 130/321] Fix FiltersAggregation NPE when `filters` is empty (#41459) If `keyedFilters` is null it assumes there are unkeyed filters...which will NPE if the unkeyed filters was actually empty. This refactors to simplify the filter assignment a bit, adds an empty check and tidies up some formatting. --- .../search.aggregation/220_filters_bucket.yml | 12 ++++ .../filter/FiltersAggregationBuilder.java | 61 ++++++++++--------- .../aggregations/bucket/FiltersTests.java | 38 +++++++++++- 3 files changed, 78 insertions(+), 33 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml index a6b7cae104418..60e1b3cb5e4da 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml @@ -251,8 +251,20 @@ setup: --- "Bad params": + - skip: + version: " - 7.99.99" # TODO fix version after backport + reason: "empty bodies throws exception starting in 7.2" + - do: + catch: /\[filters\] cannot be empty/ + search: + rest_total_hits_as_int: true + body: + aggs: + the_filter: + filters: {} - do: + catch: /\[filters\] cannot be empty/ search: rest_total_hits_as_int: true body: diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java index 810126e851251..54dfc301b2dbc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java @@ -40,6 +40,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -47,7 +48,7 @@ import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; public class FiltersAggregationBuilder extends AbstractAggregationBuilder - implements MultiBucketAggregationBuilder { + implements MultiBucketAggregationBuilder { public static final String NAME = "filters"; private static final ParseField FILTERS_FIELD = new ParseField("filters"); @@ -74,7 +75,7 @@ private FiltersAggregationBuilder(String name, List filters, boolea this.filters = new ArrayList<>(filters); if (keyed) { // internally we want to have a fixed order of filters, regardless of the order of the filters in the request - Collections.sort(this.filters, (KeyedFilter kf1, KeyedFilter kf2) -> kf1.key().compareTo(kf2.key())); + this.filters.sort(Comparator.comparing(KeyedFilter::key)); this.keyed = true; } else { this.keyed = false; @@ -220,9 +221,9 @@ protected AggregationBuilder doRewrite(QueryRewriteContext queryShardContext) th @Override protected AggregatorFactory doBuild(SearchContext context, AggregatorFactory parent, Builder subFactoriesBuilder) - throws IOException { + throws IOException { return new FiltersAggregatorFactory(name, filters, keyed, otherBucket, otherBucketKey, context, parent, - subFactoriesBuilder, metaData); + subFactoriesBuilder, metaData); } @Override @@ -248,15 +249,15 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param } public static FiltersAggregationBuilder parse(String aggregationName, XContentParser parser) - throws IOException { + throws IOException { - List keyedFilters = null; - List nonKeyedFilters = null; + List filters = new ArrayList<>(); - XContentParser.Token token = null; + XContentParser.Token token; String currentFieldName = null; String otherBucketKey = null; Boolean otherBucket = null; + boolean keyed = false; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -265,61 +266,61 @@ public static FiltersAggregationBuilder parse(String aggregationName, XContentPa otherBucket = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.VALUE_STRING) { if (OTHER_BUCKET_KEY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { otherBucketKey = parser.text(); } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_OBJECT) { if (FILTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - keyedFilters = new ArrayList<>(); String key = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { key = parser.currentName(); } else { QueryBuilder filter = parseInnerQueryBuilder(parser); - keyedFilters.add(new FiltersAggregator.KeyedFilter(key, filter)); + filters.add(new FiltersAggregator.KeyedFilter(key, filter)); } } + keyed = true; } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { if (FILTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - nonKeyedFilters = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + List builders = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { QueryBuilder filter = parseInnerQueryBuilder(parser); - nonKeyedFilters.add(filter); + builders.add(filter); + } + for (int i = 0; i < builders.size(); i++) { + filters.add(new KeyedFilter(String.valueOf(i), builders.get(i))); } } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } + if (filters.isEmpty()) { + throw new IllegalArgumentException("[" + FILTERS_FIELD + "] cannot be empty."); + } + + FiltersAggregationBuilder factory = new FiltersAggregationBuilder(aggregationName, filters, keyed); + if (otherBucket == null && otherBucketKey != null) { // automatically enable the other bucket if a key is set, as per the doc otherBucket = true; } - - FiltersAggregationBuilder factory; - if (keyedFilters != null) { - factory = new FiltersAggregationBuilder(aggregationName, - keyedFilters.toArray(new FiltersAggregator.KeyedFilter[keyedFilters.size()])); - } else { - factory = new FiltersAggregationBuilder(aggregationName, - nonKeyedFilters.toArray(new QueryBuilder[nonKeyedFilters.size()])); - } if (otherBucket != null) { factory.otherBucket(otherBucket); } @@ -338,9 +339,9 @@ protected int doHashCode() { protected boolean doEquals(Object obj) { FiltersAggregationBuilder other = (FiltersAggregationBuilder) obj; return Objects.equals(filters, other.filters) - && Objects.equals(keyed, other.keyed) - && Objects.equals(otherBucket, other.otherBucket) - && Objects.equals(otherBucketKey, other.otherBucketKey); + && Objects.equals(keyed, other.keyed) + && Objects.equals(otherBucket, other.otherBucket) + && Objects.equals(otherBucketKey, other.otherBucketKey); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java index 4c7fdccb64b00..aa1ff6f55af82 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java @@ -92,7 +92,9 @@ public void testFiltersSortedByKey() { public void testOtherBucket() throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); - builder.startArray("filters").endArray(); + builder.startArray("filters") + .startObject().startObject("term").field("field", "foo").endObject().endObject() + .endArray(); builder.endObject(); try (XContentParser parser = createParser(shuffleXContent(builder))) { parser.nextToken(); @@ -102,7 +104,9 @@ public void testOtherBucket() throws IOException { builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); - builder.startArray("filters").endArray(); + builder.startArray("filters") + .startObject().startObject("term").field("field", "foo").endObject().endObject() + .endArray(); builder.field("other_bucket_key", "some_key"); builder.endObject(); } @@ -114,7 +118,9 @@ public void testOtherBucket() throws IOException { builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); - builder.startArray("filters").endArray(); + builder.startArray("filters") + .startObject().startObject("term").field("field", "foo").endObject().endObject() + .endArray(); builder.field("other_bucket", false); builder.field("other_bucket_key", "some_key"); builder.endObject(); @@ -192,4 +198,30 @@ public void testRewritePreservesOtherBucket() throws IOException { assertEquals(originalFilters.otherBucket(), rewrittenFilters.otherBucket()); assertEquals(originalFilters.otherBucketKey(), rewrittenFilters.otherBucketKey()); } + + public void testEmptyFilters() throws IOException { + { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder.startObject(); + builder.startArray("filters").endArray(); // unkeyed array + builder.endObject(); + XContentParser parser = createParser(shuffleXContent(builder)); + parser.nextToken(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> FiltersAggregationBuilder.parse("agg_name", parser)); + assertThat(e.getMessage(), equalTo("[filters] cannot be empty.")); + } + + { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder.startObject(); + builder.startObject("filters").endObject(); // keyed object + builder.endObject(); + XContentParser parser = createParser(shuffleXContent(builder)); + parser.nextToken(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> FiltersAggregationBuilder.parse("agg_name", parser)); + assertThat(e.getMessage(), equalTo("[filters] cannot be empty.")); + } + } } From e9cb0dbe52006959a99f8d45cfbbc8b774b06ade Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 20 May 2019 09:55:35 -0400 Subject: [PATCH 131/321] Fix max boundary for rollups job that use a delay (#42158) Rollup jobs can define how long they should wait before rolling up new documents. However if the delay is smaller or if it's not a multiple of the rollup interval the job can create incomplete buckets because the max boundary for a job is computed from the time when the job started rounded to the interval minus the delay. This change fixes this computation by applying the delay substraction before the rounding in order to ensure that we never create a boundary that falls in a middle of a bucket. --- .../xpack/rollup/job/RollupIndexer.java | 13 ++--- .../job/RollupIndexerIndexingTests.java | 52 +++++++++++++++++++ 2 files changed, 57 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index 4eead794e9439..558242dfb7fa4 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -97,15 +97,12 @@ protected String getJobId() { @Override protected void onStart(long now, ActionListener listener) { try { - // this is needed to exclude buckets that can still receive new documents. + // this is needed to exclude buckets that can still receive new documents DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); - long rounded = dateHisto.createRounding().round(now); - if (dateHisto.getDelay() != null) { - // if the job has a delay we filter all documents that appear before it. - maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); - } else { - maxBoundary = rounded; - } + // if the job has a delay we filter all documents that appear before it + long delay = dateHisto.getDelay() != null ? + TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis() : 0; + maxBoundary = dateHisto.createRounding().round(now - delay); listener.onResponse(null); } catch (Exception e) { listener.onFailure(e); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index 7346cf6f855e1..1fe01a8246267 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -326,6 +326,58 @@ public void testSimpleDateHistoWithDelay() throws Exception { }); } + public void testSimpleDateHistoWithOverlappingDelay() throws Exception { + String rollupIndex = randomAlphaOfLengthBetween(5, 10); + String field = "the_histo"; + DateHistogramGroupConfig dateHistoConfig = + new FixedInterval(field, new DateHistogramInterval("1h"), new DateHistogramInterval("15m"), null); + RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList()); + final List> dataset = new ArrayList<>(); + long now = asLong("2015-04-01T10:30:00.000Z"); + dataset.addAll( + Arrays.asList( + asMap("the_histo", now - TimeValue.timeValueMinutes(135).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(120).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(105).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(90).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(75).getMillis()), + asMap("the_histo", now - TimeValue.timeValueHours(1).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(45).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(30).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(15).getMillis()), + asMap("the_histo", now) + ) + ); + final Rounding rounding = dateHistoConfig.createRounding(); + executeTestCase(dataset, job, now, (resp) -> { + assertThat(resp.size(), equalTo(2)); + IndexRequest request = resp.get(0); + assertThat(request.index(), equalTo(rollupIndex)); + assertThat(request.sourceAsMap(), equalTo( + asMap( + "_rollup.version", 2, + "the_histo.date_histogram.timestamp", rounding.round(now - TimeValue.timeValueHours(2).getMillis()), + "the_histo.date_histogram.interval", "1h", + "the_histo.date_histogram._count", 3, + "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), + "_rollup.id", job.getId() + ) + )); + request = resp.get(1); + assertThat(request.index(), equalTo(rollupIndex)); + assertThat(request.sourceAsMap(), equalTo( + asMap( + "_rollup.version", 2, + "the_histo.date_histogram.timestamp", rounding.round(now - TimeValue.timeValueHours(1).getMillis()), + "the_histo.date_histogram.interval", "1h", + "the_histo.date_histogram._count", 4, + "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), + "_rollup.id", job.getId() + ) + )); + }); + } + public void testSimpleDateHistoWithTimeZone() throws Exception { final List> dataset = new ArrayList<>(); long now = asLong("2015-04-01T10:00:00.000Z"); From 3b79300f7bbfb7de68f02d8715daefe1a7fa1ad6 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Mon, 20 May 2019 10:08:51 -0400 Subject: [PATCH 132/321] Update skip version after backport --- .../test/search.aggregation/220_filters_bucket.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml index 60e1b3cb5e4da..e0183f0c54f66 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml @@ -252,7 +252,7 @@ setup: --- "Bad params": - skip: - version: " - 7.99.99" # TODO fix version after backport + version: " - 7.1.99" reason: "empty bodies throws exception starting in 7.2" - do: catch: /\[filters\] cannot be empty/ From 40280bfe17b80b3232bfe88b31182bec432e77ad Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Mon, 20 May 2019 10:17:08 -0400 Subject: [PATCH 133/321] Update to joda time 2.10.2 (#42199) --- buildSrc/version.properties | 2 +- server/licenses/joda-time-2.10.1.jar.sha1 | 1 - server/licenses/joda-time-2.10.2.jar.sha1 | 1 + .../java/org/elasticsearch/common/time/DateUtilsTests.java | 3 ++- .../plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 | 1 - .../plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 | 1 + 6 files changed, 5 insertions(+), 4 deletions(-) delete mode 100644 server/licenses/joda-time-2.10.1.jar.sha1 create mode 100644 server/licenses/joda-time-2.10.2.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 8f7911574979d..471cb3a705cf5 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -21,7 +21,7 @@ slf4j = 1.6.2 jna = 4.5.1 netty = 4.1.35.Final -joda = 2.10.1 +joda = 2.10.2 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) diff --git a/server/licenses/joda-time-2.10.1.jar.sha1 b/server/licenses/joda-time-2.10.1.jar.sha1 deleted file mode 100644 index 75e809754ecee..0000000000000 --- a/server/licenses/joda-time-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ac3dbf89dbf2ee385185dd0cd3064fe789efee0 \ No newline at end of file diff --git a/server/licenses/joda-time-2.10.2.jar.sha1 b/server/licenses/joda-time-2.10.2.jar.sha1 new file mode 100644 index 0000000000000..9cbac57161c8e --- /dev/null +++ b/server/licenses/joda-time-2.10.2.jar.sha1 @@ -0,0 +1 @@ +a079fc39ccc3de02acdeb7117443e5d9bd431687 \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java b/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java index 2b125127f66d3..4ef095da049ec 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java @@ -45,7 +45,8 @@ public class DateUtilsTests extends ESTestCase { private static final Set IGNORE = new HashSet<>(Arrays.asList( - "Eire", "Europe/Dublin" // dublin timezone in joda does not account for DST + "Eire", "Europe/Dublin", // dublin timezone in joda does not account for DST + "Asia/Qostanay" // this has been added in joda 2.10.2 but is not part of the JDK 12.0.1 tzdata yet )); public void testTimezoneIds() { diff --git a/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 deleted file mode 100644 index 75e809754ecee..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ac3dbf89dbf2ee385185dd0cd3064fe789efee0 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 new file mode 100644 index 0000000000000..9cbac57161c8e --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 @@ -0,0 +1 @@ +a079fc39ccc3de02acdeb7117443e5d9bd431687 \ No newline at end of file From 77b895fb7126f4deee56e73134f9727de4cffd80 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Mon, 20 May 2019 11:52:38 -0400 Subject: [PATCH 134/321] [ML] Fix logger declaration in ML plugins (#42222) This corrects what appears to have been a copy-paste error where the logger for `MachineLearning` and `DataFrame` was wrongly set to be that of `XPackPlugin`. --- .../java/org/elasticsearch/xpack/dataframe/DataFrame.java | 8 +------- .../java/org/elasticsearch/xpack/ml/MachineLearning.java | 2 +- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java index b7e6c235f8e6c..34343e5fe8820 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java @@ -76,10 +76,8 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.function.Supplier; import java.util.function.UnaryOperator; @@ -90,11 +88,7 @@ public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlu public static final String NAME = "data_frame"; public static final String TASK_THREAD_POOL_NAME = "data_frame_indexing"; - // list of headers that will be stored when a transform is created - public static final Set HEADER_FILTERS = new HashSet<>( - Arrays.asList("es-security-runas-user", "_xpack_security_authentication")); - - private static final Logger logger = LogManager.getLogger(XPackPlugin.class); + private static final Logger logger = LogManager.getLogger(DataFrame.class); private final boolean enabled; private final Settings settings; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 714b2712367a4..de945b9bc6c3d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -300,7 +300,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu public static final Setting MIN_DISK_SPACE_OFF_HEAP = Setting.byteSizeSetting("xpack.ml.min_disk_space_off_heap", new ByteSizeValue(5, ByteSizeUnit.GB), Setting.Property.NodeScope); - private static final Logger logger = LogManager.getLogger(XPackPlugin.class); + private static final Logger logger = LogManager.getLogger(MachineLearning.class); private final Settings settings; private final Environment env; From 48b55e95d0fe74613297a8ed13a6ba1cdaf115ec Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 20 May 2019 12:24:21 -0400 Subject: [PATCH 135/321] Update api links per context (#42033) This manually updates the API links contained in each context doc to correctly point to both the shared API and specialized API if available. This is a temporary fix until full automation is completed for generating context documentation. --- .../painless-analysis-predicate-context.asciidoc | 2 +- .../painless-bucket-script-agg-context.asciidoc | 2 +- .../painless-bucket-selector-agg-context.asciidoc | 4 ++++ .../painless-contexts/painless-field-context.asciidoc | 3 ++- .../painless-contexts/painless-filter-context.asciidoc | 2 +- .../painless-ingest-processor-context.asciidoc | 3 ++- .../painless-metric-agg-combine-context.asciidoc | 2 +- .../painless-metric-agg-init-context.asciidoc | 2 +- .../painless-metric-agg-map-context.asciidoc | 2 +- .../painless-metric-agg-reduce-context.asciidoc | 2 +- .../painless-min-should-match-context.asciidoc | 2 +- .../painless-contexts/painless-reindex-context.asciidoc | 2 +- .../painless-contexts/painless-score-context.asciidoc | 3 ++- .../painless-contexts/painless-similarity-context.asciidoc | 2 +- .../painless/painless-contexts/painless-sort-context.asciidoc | 2 +- .../painless-update-by-query-context.asciidoc | 2 +- .../painless-contexts/painless-update-context.asciidoc | 2 +- .../painless-watcher-condition-context.asciidoc | 2 +- .../painless-watcher-context-variables.asciidoc | 3 +-- .../painless-watcher-transform-context.asciidoc | 2 +- .../painless-contexts/painless-weight-context.asciidoc | 2 +- 21 files changed, 27 insertions(+), 21 deletions(-) diff --git a/docs/painless/painless-contexts/painless-analysis-predicate-context.asciidoc b/docs/painless/painless-contexts/painless-analysis-predicate-context.asciidoc index 07914b671e781..3edb1080611d2 100644 --- a/docs/painless/painless-contexts/painless-analysis-predicate-context.asciidoc +++ b/docs/painless/painless-contexts/painless-analysis-predicate-context.asciidoc @@ -40,4 +40,4 @@ analysis chain matches a predicate. *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc index 5a5306016945d..f6e2a4b7a5a91 100644 --- a/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc +++ b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc @@ -18,7 +18,7 @@ numeric:: ==== API -The standard <> is available. +The standard <> is available. ==== Example diff --git a/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc index 69fbce1d0828f..2d854c880cdcd 100644 --- a/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc +++ b/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc @@ -19,6 +19,10 @@ boolean:: ==== API +The standard <> is available. + +==== Example + To run this example, first follow the steps in <>. diff --git a/docs/painless/painless-contexts/painless-field-context.asciidoc b/docs/painless/painless-contexts/painless-field-context.asciidoc index 15a9f4255232c..5a95e88c68460 100644 --- a/docs/painless/painless-contexts/painless-field-context.asciidoc +++ b/docs/painless/painless-contexts/painless-field-context.asciidoc @@ -25,7 +25,8 @@ a customized value for each document in the results of a query. *API* -The standard <> is available. +Both the standard <> and +<> are available. *Example* diff --git a/docs/painless/painless-contexts/painless-filter-context.asciidoc b/docs/painless/painless-contexts/painless-filter-context.asciidoc index bf4741cfc02fc..eea810f616291 100644 --- a/docs/painless/painless-contexts/painless-filter-context.asciidoc +++ b/docs/painless/painless-contexts/painless-filter-context.asciidoc @@ -23,7 +23,7 @@ query to include and exclude documents. *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc b/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc index 546057ab1a0b8..858949deb5602 100644 --- a/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc +++ b/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc @@ -38,7 +38,8 @@ void:: *API* -The standard <> is available. +Both the standard <> and +<> are available. *Example* diff --git a/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc index 5cc9ad8ecbb93..2d5edf6ab4cd8 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc @@ -24,4 +24,4 @@ optional as part of a full metric aggregation. *API* -The standard <> is available. +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc index 8c0fddfa33961..78ebac79c65ee 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc @@ -29,4 +29,4 @@ full metric aggregation. *API* -The standard <> is available. +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc index a34308aa93887..485d4da8439d8 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc @@ -44,4 +44,4 @@ part of a full metric aggregation. *API* -The standard <> is available. +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc index b492207ef4468..ba6b6dabdc924 100644 --- a/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc +++ b/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc @@ -25,4 +25,4 @@ specified) and is optional as part of a full metric aggregation. *API* -The standard <> is available. +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc index cd476481381a6..896e882c7837d 100644 --- a/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc +++ b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc @@ -25,7 +25,7 @@ results. *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-reindex-context.asciidoc b/docs/painless/painless-contexts/painless-reindex-context.asciidoc index ae5445183a6ad..54791f2fa50db 100644 --- a/docs/painless/painless-contexts/painless-reindex-context.asciidoc +++ b/docs/painless/painless-contexts/painless-reindex-context.asciidoc @@ -65,4 +65,4 @@ reindexed into a target index. *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-score-context.asciidoc b/docs/painless/painless-contexts/painless-score-context.asciidoc index 2bec9021c1720..e5d3c538b4512 100644 --- a/docs/painless/painless-contexts/painless-score-context.asciidoc +++ b/docs/painless/painless-contexts/painless-score-context.asciidoc @@ -26,7 +26,8 @@ score to documents returned from a query. *API* -The standard <> is available. +Both the standard <> and +<> are available. *Example* diff --git a/docs/painless/painless-contexts/painless-similarity-context.asciidoc b/docs/painless/painless-contexts/painless-similarity-context.asciidoc index 98eff19a1943e..e48da21195dd7 100644 --- a/docs/painless/painless-contexts/painless-similarity-context.asciidoc +++ b/docs/painless/painless-contexts/painless-similarity-context.asciidoc @@ -56,4 +56,4 @@ uses synonyms. *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-sort-context.asciidoc b/docs/painless/painless-contexts/painless-sort-context.asciidoc index 64c17ad07a664..4a7743dc48800 100644 --- a/docs/painless/painless-contexts/painless-sort-context.asciidoc +++ b/docs/painless/painless-contexts/painless-sort-context.asciidoc @@ -25,7 +25,7 @@ Use a Painless script to *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc index ba42105f2e901..c9e72ac5b9288 100644 --- a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc +++ b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc @@ -51,7 +51,7 @@ result of query. *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-update-context.asciidoc b/docs/painless/painless-contexts/painless-update-context.asciidoc index 6ed8c2f7c13a3..a83bf47de1f78 100644 --- a/docs/painless/painless-contexts/painless-update-context.asciidoc +++ b/docs/painless/painless-contexts/painless-update-context.asciidoc @@ -52,7 +52,7 @@ add, modify, or delete fields within a single document. *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc index 91ab51561ef88..8e4924d426b0c 100644 --- a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc @@ -14,7 +14,7 @@ include::painless-watcher-context-variables.asciidoc[] *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc b/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc index addfd11cab92e..71009d819a42d 100644 --- a/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc @@ -33,8 +33,7 @@ The following variables are available in all watcher contexts. *API* - -The standard <> is available. +The standard <> is available. To run this example, first follow the steps in <>. diff --git a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc index 92012720aa69e..ec0ac6519a44f 100644 --- a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc @@ -14,7 +14,7 @@ include::painless-watcher-context-variables.asciidoc[] *API* -The standard <> is available. +The standard <> is available. *Example* diff --git a/docs/painless/painless-contexts/painless-weight-context.asciidoc b/docs/painless/painless-contexts/painless-weight-context.asciidoc index 9b4a47bc113b4..44438a1225ea6 100644 --- a/docs/painless/painless-contexts/painless-weight-context.asciidoc +++ b/docs/painless/painless-contexts/painless-weight-context.asciidoc @@ -39,4 +39,4 @@ Queries that contain multiple terms calculate a separate weight for each term. *API* -The standard <> is available. +The standard <> is available. From fdcbf056c60d31ebf23af0450aa5f21eb522a1ba Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 20 May 2019 18:33:44 +0200 Subject: [PATCH 136/321] Cleanup Various Uses of ActionListener (#40126) * Cleanup Various Uses of ActionListener * Use shorter `map`, `runAfter` or `wrap` where functionally equivalent to anonymous class * Use ActionRunnable where functionally equivalent --- .../tasks/get/TransportGetTaskAction.java | 28 +++----- .../TransportSnapshotsStatusAction.java | 22 ++----- .../upgrade/post/TransportUpgradeAction.java | 25 ++----- .../action/bulk/BulkRequestHandler.java | 21 ++---- .../ingest/PutPipelineTransportAction.java | 25 ++----- .../support/ThreadedActionListener.java | 10 +-- .../broadcast/TransportBroadcastAction.java | 42 ++++-------- ...ransportInstanceSingleOperationAction.java | 29 +++----- .../shard/TransportSingleShardAction.java | 9 +-- .../support/tasks/TransportTasksAction.java | 18 +---- .../elasticsearch/search/SearchService.java | 48 ++------------ .../transport/TransportKeepAlive.java | 5 +- .../action/RejectionActionIT.java | 6 +- .../node/tasks/TransportTasksActionTests.java | 12 +--- .../search/ClearScrollControllerTests.java | 66 ++++++------------- .../TransportActionFilterChainTests.java | 11 ++-- .../TransportWriteActionTests.java | 12 +--- .../decider/EnableAssignmentDeciderIT.java | 14 +--- 18 files changed, 96 insertions(+), 307 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index fe07a4efe930e..d1d72da544560 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -203,27 +203,15 @@ void getFinishedTaskFromIndex(Task thisTask, GetTaskRequest request, ActionListe request.getTaskId().toString()); get.setParentTask(clusterService.localNode().getId(), thisTask.getId()); - client.get(get, new ActionListener() { - @Override - public void onResponse(GetResponse getResponse) { - try { - onGetFinishedTaskFromIndex(getResponse, listener); - } catch (Exception e) { - listener.onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) { - // We haven't yet created the index for the task results so it can't be found. - listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e, - request.getTaskId())); - } else { - listener.onFailure(e); - } + client.get(get, ActionListener.wrap(r -> onGetFinishedTaskFromIndex(r, listener), e -> { + if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) { + // We haven't yet created the index for the task results so it can't be found. + listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e, + request.getTaskId())); + } else { + listener.onFailure(e); } - }); + })); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 5dfc24d1e280e..c2f0d3dd0c074 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -119,23 +119,11 @@ protected void masterOperation(final SnapshotsStatusRequest request, TransportNodesSnapshotsStatus.Request nodesRequest = new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(new String[nodesIds.size()])) .snapshots(snapshots).timeout(request.masterNodeTimeout()); - transportNodesSnapshotsStatus.execute(nodesRequest, new ActionListener() { - @Override - public void onResponse(TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) { - try { - List currentSnapshots = - snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())); - listener.onResponse(buildResponse(request, currentSnapshots, nodeSnapshotStatuses)); - } catch (Exception e) { - listener.onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + transportNodesSnapshotsStatus.execute(nodesRequest, + ActionListener.map( + listener, nodeSnapshotStatuses -> + buildResponse(request, snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())), + nodeSnapshotStatuses))); } else { // We don't have any in-progress shards, just return current stats listener.onResponse(buildResponse(request, currentSnapshots, null)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index f2d046f3321b2..b122350c3e61d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -184,26 +184,13 @@ protected ClusterBlockException checkRequestBlock(ClusterState state, UpgradeReq @Override protected void doExecute(Task task, UpgradeRequest request, final ActionListener listener) { - ActionListener settingsUpdateListener = new ActionListener() { - @Override - public void onResponse(UpgradeResponse upgradeResponse) { - try { - if (upgradeResponse.versions().isEmpty()) { - listener.onResponse(upgradeResponse); - } else { - updateSettings(upgradeResponse, listener); - } - } catch (Exception e) { - listener.onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); + super.doExecute(task, request, ActionListener.wrap(upgradeResponse -> { + if (upgradeResponse.versions().isEmpty()) { + listener.onResponse(upgradeResponse); + } else { + updateSettings(upgradeResponse, listener); } - }; - super.doExecute(task, request, settingsUpdateListener); + }, listener::onFailure)); } private void updateSettings(final UpgradeResponse upgradeResponse, final ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java index 2f5db520088e9..7890fb4e83fc1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -59,27 +59,20 @@ public void execute(BulkRequest bulkRequest, long executionId) { semaphore.acquire(); toRelease = semaphore::release; CountDownLatch latch = new CountDownLatch(1); - retry.withBackoff(consumer, bulkRequest, new ActionListener() { + retry.withBackoff(consumer, bulkRequest, ActionListener.runAfter(new ActionListener() { @Override public void onResponse(BulkResponse response) { - try { - listener.afterBulk(executionId, bulkRequest, response); - } finally { - semaphore.release(); - latch.countDown(); - } + listener.afterBulk(executionId, bulkRequest, response); } @Override public void onFailure(Exception e) { - try { - listener.afterBulk(executionId, bulkRequest, e); - } finally { - semaphore.release(); - latch.countDown(); - } + listener.afterBulk(executionId, bulkRequest, e); } - }); + }, () -> { + semaphore.release(); + latch.countDown(); + })); bulkRequestSetupSuccessful = true; if (concurrentRequests == 0) { latch.await(); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index 97f13bf71d14c..be1528a354bc3 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -74,25 +73,13 @@ protected void masterOperation(PutPipelineRequest request, ClusterState state, A NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); nodesInfoRequest.clear(); nodesInfoRequest.ingest(true); - client.admin().cluster().nodesInfo(nodesInfoRequest, new ActionListener() { - @Override - public void onResponse(NodesInfoResponse nodeInfos) { - try { - Map ingestInfos = new HashMap<>(); - for (NodeInfo nodeInfo : nodeInfos.getNodes()) { - ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest()); - } - ingestService.putPipeline(ingestInfos, request, listener); - } catch (Exception e) { - onFailure(e); - } + client.admin().cluster().nodesInfo(nodesInfoRequest, ActionListener.wrap(nodeInfos -> { + Map ingestInfos = new HashMap<>(); + for (NodeInfo nodeInfo : nodeInfos.getNodes()) { + ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest()); } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + ingestService.putPipeline(ingestInfos, request, listener); + }, listener::onFailure)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java index dfcf6445abf7d..ad72ef10139ba 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; @@ -86,21 +87,16 @@ public ThreadedActionListener(Logger logger, ThreadPool threadPool, String execu @Override public void onResponse(final Response response) { - threadPool.executor(executor).execute(new AbstractRunnable() { + threadPool.executor(executor).execute(new ActionRunnable<>(listener) { @Override public boolean isForceExecution() { return forceExecution; } @Override - protected void doRun() throws Exception { + protected void doRun() { listener.onResponse(response); } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } }); } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 87c9e15324152..15daaf786b604 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -36,7 +37,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -287,45 +287,25 @@ class ShardTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardRequest request, TransportChannel channel, Task task) throws Exception { - asyncShardOperation(request, task, new ActionListener() { - @Override - public void onResponse(ShardResponse response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn(() -> new ParameterizedMessage( - "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); + asyncShardOperation(request, task, + ActionListener.wrap(channel::sendResponse, e -> { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn(() -> new ParameterizedMessage( + "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); + } } - } - }); + )); } } protected void asyncShardOperation(ShardRequest request, Task task, ActionListener listener) { - transportService.getThreadPool().executor(getExecutor(request)).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + transportService.getThreadPool().executor(shardExecutor).execute(new ActionRunnable(listener) { @Override protected void doRun() throws Exception { listener.onResponse(shardOperation(request, task)); } }); } - - protected String getExecutor(ShardRequest request) { - return shardExecutor; - } - } diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java index c575c3b233872..d1d7b6ffac597 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -254,27 +254,16 @@ private class ShardTransportHandler implements TransportRequestHandler @Override public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { - shardOperation(request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn("failed to send response for get", inner); + shardOperation(request, + ActionListener.wrap(channel::sendResponse, e -> { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn("failed to send response for get", inner); + } } - } - }); - + )); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 3c2e7f9a49e0d..123ed11769aa7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ChannelActionListener; @@ -40,7 +41,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.LoggerMessageFormat; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -107,12 +107,7 @@ protected void doExecute(Task task, Request request, ActionListener li protected abstract Response shardOperation(Request request, ShardId shardId) throws IOException; protected void asyncShardOperation(Request request, ShardId shardId, ActionListener listener) throws IOException { - threadPool.executor(getExecutor(request, shardId)).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + threadPool.executor(getExecutor(request, shardId)).execute(new ActionRunnable<>(listener) { @Override protected void doRun() throws Exception { listener.onResponse(shardOperation(request, shardId)); diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index c2f9872ca5cee..8d80a15beb14b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -329,19 +329,8 @@ class NodeTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final NodeTaskRequest request, final TransportChannel channel, Task task) throws Exception { - nodeOperation(request, new ActionListener() { - @Override - public void onResponse( - TransportTasksAction.NodeTasksResponse response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { + nodeOperation(request, ActionListener.wrap(channel::sendResponse, + e -> { try { channel.sendResponse(e); } catch (IOException e1) { @@ -349,11 +338,10 @@ public void onFailure(Exception e) { logger.warn("Failed to send failure", e1); } } - }); + )); } } - private class NodeTaskRequest extends TransportRequest { private TasksRequest tasksRequest; diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 8cf3138212f7f..b703493b4d505 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; @@ -39,7 +40,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.core.internal.io.IOUtils; @@ -302,21 +302,7 @@ protected void doClose() { } public void executeDfsPhase(ShardSearchRequest request, SearchTask task, ActionListener listener) { - rewriteShardRequest(request, new ActionListener() { - @Override - public void onResponse(ShardSearchRequest request) { - try { - listener.onResponse(executeDfsPhase(request, task)); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + rewriteShardRequest(request, ActionListener.map(listener, r -> executeDfsPhase(r, task))); } private DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchTask task) throws IOException { @@ -351,30 +337,11 @@ private void loadOrExecuteQueryPhase(final ShardSearchRequest request, final Sea } public void executeQueryPhase(ShardSearchRequest request, SearchTask task, ActionListener listener) { - rewriteShardRequest(request, new ActionListener() { - @Override - public void onResponse(ShardSearchRequest request) { - try { - listener.onResponse(executeQueryPhase(request, task)); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + rewriteShardRequest(request, ActionListener.map(listener, r -> executeQueryPhase(r, task))); } private void runAsync(long id, Supplier executable, ActionListener listener) { - getExecutor(id).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + getExecutor(id).execute(new ActionRunnable(listener) { @Override protected void doRun() { listener.onResponse(executable.get()); @@ -1058,12 +1025,7 @@ private void rewriteShardRequest(ShardSearchRequest request, ActionListener actionListener = ActionListener.wrap(r -> // now we need to check if there is a pending refresh and register shard.awaitShardSearchActive(b -> - executor.execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + executor.execute(new ActionRunnable(listener) { @Override protected void doRun() { listener.onResponse(request); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java index fc7ebe4b9644e..9e49d06f2b0b6 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java @@ -84,10 +84,7 @@ void registerNodeConnection(List nodeChannels, ConnectionProfile con for (TcpChannel channel : nodeChannels) { scheduledPing.addChannel(channel); - - channel.addCloseListener(ActionListener.wrap(() -> { - scheduledPing.removeChannel(channel); - })); + channel.addCloseListener(ActionListener.wrap(() -> scheduledPing.removeChannel(channel))); } } diff --git a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java index ad2447cb7b3d0..e0ef29bf7f49e 100644 --- a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java +++ b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java @@ -65,19 +65,17 @@ public void testSimulatedSearchRejectionLoad() throws Throwable { client().prepareSearch("test") .setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(QueryBuilders.matchQuery("field", "1")) - .execute(new ActionListener() { + .execute(new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(SearchResponse searchResponse) { responses.add(searchResponse); - latch.countDown(); } @Override public void onFailure(Exception e) { responses.add(e); - latch.countDown(); } - }); + }, latch)); } latch.await(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index e9b940df3847d..b883d593352c2 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -470,17 +470,7 @@ public void testCancellingTasksThatDontSupportCancellation() throws Exception { connectNodes(testNodes); CountDownLatch checkLatch = new CountDownLatch(1); CountDownLatch responseLatch = new CountDownLatch(1); - Task task = startBlockingTestNodesAction(checkLatch, new ActionListener() { - @Override - public void onResponse(NodesResponse nodeResponses) { - responseLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - responseLatch.countDown(); - } - }); + Task task = startBlockingTestNodesAction(checkLatch, ActionListener.wrap(responseLatch::countDown)); String actionName = "internal:testAction"; // only pick the main action // Try to cancel main task using action name diff --git a/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java index 55c39f735ce31..bcb4a1200b7e8 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -42,32 +43,24 @@ public class ClearScrollControllerTests extends ESTestCase { - public void testClearAll() throws IOException, InterruptedException { + public void testClearAll() throws InterruptedException { DiscoveryNode node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); CountDownLatch latch = new CountDownLatch(1); - ActionListener listener = new ActionListener() { + ActionListener listener = new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(ClearScrollResponse clearScrollResponse) { - try { - assertEquals(3, clearScrollResponse.getNumFreed()); - assertTrue(clearScrollResponse.isSucceeded()); - } finally { - latch.countDown(); - } + assertEquals(3, clearScrollResponse.getNumFreed()); + assertTrue(clearScrollResponse.isSucceeded()); } @Override public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } + throw new AssertionError(e); } - }; + }, latch); List nodesInvoked = new CopyOnWriteArrayList<>(); SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override @@ -112,27 +105,18 @@ public void testClearScrollIds() throws IOException, InterruptedException { String scrollId = TransportSearchHelper.buildScrollId(array); DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); CountDownLatch latch = new CountDownLatch(1); - ActionListener listener = new ActionListener() { + ActionListener listener = new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(ClearScrollResponse clearScrollResponse) { - try { - assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); - assertTrue(clearScrollResponse.isSucceeded()); - } finally { - latch.countDown(); - } - + assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); + assertTrue(clearScrollResponse.isSucceeded()); } @Override public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } + throw new AssertionError(e); } - }; + }, latch); List nodesInvoked = new CopyOnWriteArrayList<>(); SearchTransportService searchTransportService = new SearchTransportService(null, null) { @@ -185,32 +169,22 @@ public void testClearScrollIdsWithFailure() throws IOException, InterruptedExcep DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); CountDownLatch latch = new CountDownLatch(1); - ActionListener listener = new ActionListener() { + ActionListener listener = new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(ClearScrollResponse clearScrollResponse) { - try { - assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); - if (numFailures.get() > 0) { - assertFalse(clearScrollResponse.isSucceeded()); - } else { - assertTrue(clearScrollResponse.isSucceeded()); - } - - } finally { - latch.countDown(); + assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); + if (numFailures.get() > 0) { + assertFalse(clearScrollResponse.isSucceeded()); + } else { + assertTrue(clearScrollResponse.isSucceeded()); } - } @Override public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } + throw new AssertionError(e); } - }; + }, latch); List nodesInvoked = new CopyOnWriteArrayList<>(); SearchTransportService searchTransportService = new SearchTransportService(null, null) { diff --git a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index f222bcc015c62..96d057f50c4f7 100644 --- a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; @@ -65,7 +66,7 @@ public void shutdown() throws Exception { terminate(threadPool); } - public void testActionFiltersRequest() throws ExecutionException, InterruptedException { + public void testActionFiltersRequest() throws InterruptedException { int numFilters = randomInt(10); Set orders = new HashSet<>(numFilters); while (orders.size() < numFilters) { @@ -139,7 +140,7 @@ protected void doExecute(Task task, TestRequest request, ActionListener failures = new CopyOnWriteArrayList<>(); - transportAction.execute(new TestRequest(), new ActionListener() { + transportAction.execute(new TestRequest(), new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(TestResponse testResponse) { responses.incrementAndGet(); - latch.countDown(); } @Override public void onFailure(Exception e) { failures.add(e); - latch.countDown(); } - }); + }, latch)); if (!latch.await(10, TimeUnit.SECONDS)) { fail("timeout waiting for the filter to notify the listener as many times as expected"); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 1a7e5a73e7523..57b30d3484bc9 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -369,17 +369,7 @@ public void testConcurrentWriteReplicaResultCompletion() throws InterruptedExcep CountDownLatch completionLatch = new CountDownLatch(1); threadPool.generic().execute(() -> { waitForBarrier.run(); - replicaResult.respond(new ActionListener() { - @Override - public void onResponse(TransportResponse.Empty empty) { - completionLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - completionLatch.countDown(); - } - }); + replicaResult.respond(ActionListener.wrap(completionLatch::countDown)); }); if (randomBoolean()) { threadPool.generic().execute(() -> { diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java index aeb4d9b3a9bfb..2ea6567c9f8d0 100644 --- a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -23,9 +23,7 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.persistent.TestPersistentTasksPlugin; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; @@ -72,17 +70,7 @@ public void testEnableAssignmentAfterRestart() throws Exception { for (int i = 0; i < numberOfTasks; i++) { PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class); service.sendStartRequest("task_" + i, TestPersistentTasksExecutor.NAME, new TestParams(randomAlphaOfLength(10)), - new ActionListener>() { - @Override - public void onResponse(PersistentTask task) { - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - latch.countDown(); - } - }); + ActionListener.wrap(latch::countDown)); } latch.await(); From da5abe27c3e970dc22d6ce3a1befe0ffce3675e6 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 20 May 2019 11:35:31 -0700 Subject: [PATCH 137/321] Validate non-secure settings are not in keystore (#42209) Secure settings currently error if they exist inside elasticsearch.yml. This commit adds validation that non-secure settings do not exist inside the keystore. closes #41831 --- .../java/org/elasticsearch/common/settings/Setting.java | 5 +++++ .../org/elasticsearch/common/settings/SettingTests.java | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 514cfd3ce4ca8..1e5079124c345 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -466,6 +466,11 @@ public final String getRaw(final Settings settings) { * @return the raw string representation of the setting value */ String innerGetRaw(final Settings settings) { + SecureSettings secureSettings = settings.getSecureSettings(); + if (secureSettings != null && secureSettings.getSettingNames().contains(getKey())) { + throw new IllegalArgumentException("Setting [" + getKey() + "] is a non-secure setting" + + " and must be stored inside elasticsearch.yml, but was found inside the Elasticsearch keystore"); + } return settings.get(getKey(), defaultValue.apply(settings)); } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 220392a952c29..b2f73db90f722 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -964,4 +964,13 @@ public void testAffixMapUpdateWithNullSettingValue() { assertEquals("", value); } + public void testNonSecureSettingInKeystore() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("foo", "bar"); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + Setting setting = Setting.simpleString("foo", Property.NodeScope); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> setting.get(settings)); + assertThat(e.getMessage(), containsString("must be stored inside elasticsearch.yml")); + } + } From 570e8edc1e25f1c5197307f40fd4f73e0950620f Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Mon, 20 May 2019 21:08:14 -0400 Subject: [PATCH 138/321] Mute date_histo interval bwc test AwaitsFix https://github.com/elastic/elasticsearch/issues/42258 --- .../rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index bce9c25c08c03..8347e09e0c17d 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -48,8 +48,8 @@ --- "Put job and datafeed with aggs in old cluster - pre-deprecated interval": - skip: - version: "8.0.0 - " #TODO change this after backport - reason: calendar_interval introduced in 7.1.0 + version: "all" #TODO change this after backport + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258; calendar_interval introduced in 7.2.0" - do: ml.put_job: From 749135b37c9c317554647bb0a63f4e04281f838d Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 21 May 2019 07:52:01 +0100 Subject: [PATCH 139/321] Prevent in-place downgrades and invalid upgrades (#41731) Downgrading an Elasticsearch node to an earlier version is unsupported, because we do not make any attempt to guarantee that a node can read any of the on-disk data written by a future version. Yet today we do not actively prevent downgrades, and sometimes users will attempt to roll back a failed upgrade with an in-place downgrade and get into an unrecoverable state. This change adds the current version of the node to the node metadata file, and checks the version found in this file against the current version at startup. If the node cannot be sure of its ability to read the on-disk data then it refuses to start, preserving any on-disk data in its upgraded state. This change also adds a command-line tool to overwrite the node metadata file without performing any version checks, to unsafely bypass these checks and recover the historical and lenient behaviour. --- docs/reference/commands/node-tool.asciidoc | 63 ++++++- .../ElasticsearchNodeCommand.java | 18 +- .../cluster/coordination/NodeToolCli.java | 2 + .../elasticsearch/env/NodeEnvironment.java | 11 +- .../org/elasticsearch/env/NodeMetaData.java | 72 ++++++-- .../env/NodeRepurposeCommand.java | 14 +- .../env/OverrideNodeVersionCommand.java | 103 ++++++++++++ .../gateway/MetaDataStateFormat.java | 4 +- .../elasticsearch/env/NodeEnvironmentIT.java | 37 +++++ .../elasticsearch/env/NodeMetaDataTests.java | 118 +++++++++++++ .../env/OverrideNodeVersionCommandTests.java | 155 ++++++++++++++++++ .../env/testReadsFormatWithoutVersion.binary | Bin 0 -> 71 bytes 12 files changed, 556 insertions(+), 41 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java create mode 100644 server/src/test/java/org/elasticsearch/env/NodeMetaDataTests.java create mode 100644 server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java create mode 100644 server/src/test/resources/org/elasticsearch/env/testReadsFormatWithoutVersion.binary diff --git a/docs/reference/commands/node-tool.asciidoc b/docs/reference/commands/node-tool.asciidoc index f070d11aa8fb0..ed810a4dac014 100644 --- a/docs/reference/commands/node-tool.asciidoc +++ b/docs/reference/commands/node-tool.asciidoc @@ -4,14 +4,15 @@ The `elasticsearch-node` command enables you to perform certain unsafe operations on a node that are only possible while it is shut down. This command allows you to adjust the <> of a node and may be able to -recover some data after a disaster. +recover some data after a disaster or start a node even if it is incompatible +with the data on disk. [float] === Synopsis [source,shell] -------------------------------------------------- -bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster +bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster|override-version [--ordinal ] [-E ] [-h, --help] ([-s, --silent] | [-v, --verbose]) -------------------------------------------------- @@ -19,7 +20,7 @@ bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster [float] === Description -This tool has three modes: +This tool has four modes: * `elasticsearch-node repurpose` can be used to delete unwanted data from a node if it used to be a <> or a @@ -36,6 +37,11 @@ This tool has three modes: cluster bootstrapping was not possible, it also enables you to move nodes into a brand-new cluster. +* `elasticsearch-node override-version` enables you to start up a node + even if the data in the data path was written by an incompatible version of + {es}. This may sometimes allow you to downgrade to an earlier version of + {es}. + [[node-tool-repurpose]] [float] ==== Changing the role of a node @@ -109,6 +115,25 @@ way forward that does not risk data loss, but it may be possible to use the `elasticsearch-node` tool to construct a new cluster that contains some of the data from the failed cluster. +[[node-tool-override-version]] +[float] +==== Bypassing version checks + +The data that {es} writes to disk is designed to be read by the current version +and a limited set of future versions. It cannot generally be read by older +versions, nor by versions that are more than one major version newer. The data +stored on disk includes the version of the node that wrote it, and {es} checks +that it is compatible with this version when starting up. + +In rare circumstances it may be desirable to bypass this check and start up an +{es} node using data that was written by an incompatible version. This may not +work if the format of the stored data has changed, and it is a risky process +because it is possible for the format to change in ways that {es} may +misinterpret, silently leading to data loss. + +To bypass this check, you can use the `elasticsearch-node override-version` +tool to overwrite the version number stored in the data path with the current +version, causing {es} to believe that it is compatible with the on-disk data. [[node-tool-unsafe-bootstrap]] [float] @@ -262,6 +287,9 @@ one-node cluster. `detach-cluster`:: Specifies to unsafely detach this node from its cluster so it can join a different cluster. +`override-version`:: Overwrites the version number stored in the data path so +that a node can start despite being incompatible with the on-disk data. + `--ordinal `:: If there is <> then this specifies which node to target. Defaults to `0`, meaning to use the first node in the data path. @@ -423,3 +451,32 @@ Do you want to proceed? Confirm [y/N] y Node was successfully detached from the cluster ---- + +[float] +==== Bypassing version checks + +Run the `elasticsearch-node override-version` command to overwrite the version +stored in the data path so that a node can start despite being incompatible +with the data stored in the data path: + +[source, txt] +---- +node$ ./bin/elasticsearch-node override-version + + WARNING: Elasticsearch MUST be stopped before running this tool. + +This data path was last written by Elasticsearch version [x.x.x] and may no +longer be compatible with Elasticsearch version [y.y.y]. This tool will bypass +this compatibility check, allowing a version [y.y.y] node to start on this data +path, but a version [y.y.y] node may not be able to read this data or may read +it incorrectly leading to data loss. + +You should not use this tool. Instead, continue to use a version [x.x.x] node +on this data path. If necessary, you can use reindex-from-remote to copy the +data from here into an older cluster. + +Do you want to proceed? + +Confirm [y/N] y +Successfully overwrote this node's metadata to bypass its version compatibility checks. +---- diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index 3d59e2bceacdb..ec664c97067d1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -44,7 +44,7 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { private static final Logger logger = LogManager.getLogger(ElasticsearchNodeCommand.class); protected final NamedXContentRegistry namedXContentRegistry; - static final String DELIMITER = "------------------------------------------------------------------------\n"; + protected static final String DELIMITER = "------------------------------------------------------------------------\n"; static final String STOP_WARNING_MSG = DELIMITER + @@ -81,9 +81,8 @@ protected void processNodePathsWithLock(Terminal terminal, OptionSet options, En throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG); } processNodePaths(terminal, dataPaths, env); - } catch (LockObtainFailedException ex) { - throw new ElasticsearchException( - FAILED_TO_OBTAIN_NODE_LOCK_MSG + " [" + ex.getMessage() + "]"); + } catch (LockObtainFailedException e) { + throw new ElasticsearchException(FAILED_TO_OBTAIN_NODE_LOCK_MSG, e); } } @@ -177,6 +176,17 @@ protected void cleanUpOldMetaData(Terminal terminal, Path[] dataPaths, long newG MetaData.FORMAT.cleanupOldFiles(newGeneration, dataPaths); } + protected NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) { + return Arrays.stream(dataPaths).map(ElasticsearchNodeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new); + } + + private static NodeEnvironment.NodePath createNodePath(Path path) { + try { + return new NodeEnvironment.NodePath(path); + } catch (IOException e) { + throw new ElasticsearchException("Unable to investigate path [" + path + "]", e); + } + } //package-private for testing OptionParser getParser() { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java index d6bd22bcd76fd..ff054e71eee3a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java @@ -22,6 +22,7 @@ import org.elasticsearch.cli.MultiCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.env.NodeRepurposeCommand; +import org.elasticsearch.env.OverrideNodeVersionCommand; // NodeToolCli does not extend LoggingAwareCommand, because LoggingAwareCommand performs logging initialization // after LoggingAwareCommand instance is constructed. @@ -39,6 +40,7 @@ public NodeToolCli() { subcommands.put("repurpose", new NodeRepurposeCommand()); subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand()); subcommands.put("detach-cluster", new DetachClusterCommand()); + subcommands.put("override-version", new OverrideNodeVersionCommand()); } public static void main(String[] args) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index fc2f76d3436c0..4cfd22ecb1a65 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -31,6 +31,7 @@ import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.CheckedFunction; @@ -248,7 +249,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce sharedDataPath = null; locks = null; nodeLockId = -1; - nodeMetaData = new NodeMetaData(generateNodeId(settings)); + nodeMetaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT); return; } boolean success = false; @@ -393,7 +394,6 @@ private void maybeLogHeapDetails() { logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops); } - /** * scans the node paths and loads existing metaData file. If not found a new meta data will be generated * and persisted into the nodePaths @@ -403,10 +403,15 @@ private static NodeMetaData loadOrCreateNodeMetaData(Settings settings, Logger l final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new); NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths); if (metaData == null) { - metaData = new NodeMetaData(generateNodeId(settings)); + metaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT); + } else { + metaData = metaData.upgradeToCurrentVersion(); } + // we write again to make sure all paths have the latest state file + assert metaData.nodeVersion().equals(Version.CURRENT) : metaData.nodeVersion() + " != " + Version.CURRENT; NodeMetaData.FORMAT.writeAndCleanup(metaData, paths); + return metaData; } diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetaData.java b/server/src/main/java/org/elasticsearch/env/NodeMetaData.java index dbea3164c8a44..f9deba8f6c382 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetaData.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetaData.java @@ -19,6 +19,7 @@ package org.elasticsearch.env; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -31,66 +32,104 @@ import java.util.Objects; /** - * Metadata associated with this node. Currently only contains the unique uuid describing this node. + * Metadata associated with this node: its persistent node ID and its version. * The metadata is persisted in the data folder of this node and is reused across restarts. */ public final class NodeMetaData { private static final String NODE_ID_KEY = "node_id"; + private static final String NODE_VERSION_KEY = "node_version"; private final String nodeId; - public NodeMetaData(final String nodeId) { + private final Version nodeVersion; + + public NodeMetaData(final String nodeId, final Version nodeVersion) { this.nodeId = Objects.requireNonNull(nodeId); + this.nodeVersion = Objects.requireNonNull(nodeVersion); } @Override public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; NodeMetaData that = (NodeMetaData) o; - - return Objects.equals(this.nodeId, that.nodeId); + return nodeId.equals(that.nodeId) && + nodeVersion.equals(that.nodeVersion); } @Override public int hashCode() { - return this.nodeId.hashCode(); + return Objects.hash(nodeId, nodeVersion); } @Override public String toString() { - return "node_id [" + nodeId + "]"; + return "NodeMetaData{" + + "nodeId='" + nodeId + '\'' + + ", nodeVersion=" + nodeVersion + + '}'; } private static ObjectParser PARSER = new ObjectParser<>("node_meta_data", Builder::new); static { PARSER.declareString(Builder::setNodeId, new ParseField(NODE_ID_KEY)); + PARSER.declareInt(Builder::setNodeVersionId, new ParseField(NODE_VERSION_KEY)); } public String nodeId() { return nodeId; } + public Version nodeVersion() { + return nodeVersion; + } + + public NodeMetaData upgradeToCurrentVersion() { + if (nodeVersion.equals(Version.V_EMPTY)) { + assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; + return new NodeMetaData(nodeId, Version.CURRENT); + } + + if (nodeVersion.before(Version.CURRENT.minimumIndexCompatibilityVersion())) { + throw new IllegalStateException( + "cannot upgrade a node from version [" + nodeVersion + "] directly to version [" + Version.CURRENT + "]"); + } + + if (nodeVersion.after(Version.CURRENT)) { + throw new IllegalStateException( + "cannot downgrade a node from version [" + nodeVersion + "] to version [" + Version.CURRENT + "]"); + } + + return nodeVersion.equals(Version.CURRENT) ? this : new NodeMetaData(nodeId, Version.CURRENT); + } + private static class Builder { String nodeId; + Version nodeVersion; public void setNodeId(String nodeId) { this.nodeId = nodeId; } + public void setNodeVersionId(int nodeVersionId) { + this.nodeVersion = Version.fromId(nodeVersionId); + } + public NodeMetaData build() { - return new NodeMetaData(nodeId); + final Version nodeVersion; + if (this.nodeVersion == null) { + assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; + nodeVersion = Version.V_EMPTY; + } else { + nodeVersion = this.nodeVersion; + } + + return new NodeMetaData(nodeId, nodeVersion); } } - public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat("node-") { @Override @@ -103,10 +142,11 @@ protected XContentBuilder newXContentBuilder(XContentType type, OutputStream str @Override public void toXContent(XContentBuilder builder, NodeMetaData nodeMetaData) throws IOException { builder.field(NODE_ID_KEY, nodeMetaData.nodeId); + builder.field(NODE_VERSION_KEY, nodeMetaData.nodeVersion.id); } @Override - public NodeMetaData fromXContent(XContentParser parser) throws IOException { + public NodeMetaData fromXContent(XContentParser parser) { return PARSER.apply(parser, null).build(); } }; diff --git a/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java index 7331d8528fc64..20b5552dfa8f8 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java @@ -172,10 +172,6 @@ private String toIndexName(NodeEnvironment.NodePath[] nodePaths, String uuid) { } } - private NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) { - return Arrays.stream(dataPaths).map(NodeRepurposeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new); - } - private Set indexUUIDsFor(Set indexPaths) { return indexPaths.stream().map(Path::getFileName).map(Path::toString).collect(Collectors.toSet()); } @@ -221,19 +217,11 @@ private void removePath(Path path) { @SafeVarargs @SuppressWarnings("varargs") - private final Set uniqueParentPaths(Collection... paths) { + private Set uniqueParentPaths(Collection... paths) { // equals on Path is good enough here due to the way these are collected. return Arrays.stream(paths).flatMap(Collection::stream).map(Path::getParent).collect(Collectors.toSet()); } - private static NodeEnvironment.NodePath createNodePath(Path path) { - try { - return new NodeEnvironment.NodePath(path); - } catch (IOException e) { - throw new ElasticsearchException("Unable to investigate path: " + path + ": " + e.getMessage()); - } - } - //package-private for testing OptionParser getParser() { return parser; diff --git a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java new file mode 100644 index 0000000000000..a46e185a25351 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import joptsimple.OptionParser; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.coordination.ElasticsearchNodeCommand; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; + +public class OverrideNodeVersionCommand extends ElasticsearchNodeCommand { + private static final Logger logger = LogManager.getLogger(OverrideNodeVersionCommand.class); + + private static final String TOO_NEW_MESSAGE = + DELIMITER + + "\n" + + "This data path was last written by Elasticsearch version [V_NEW] and may no\n" + + "longer be compatible with Elasticsearch version [V_CUR]. This tool will bypass\n" + + "this compatibility check, allowing a version [V_CUR] node to start on this data\n" + + "path, but a version [V_CUR] node may not be able to read this data or may read\n" + + "it incorrectly leading to data loss.\n" + + "\n" + + "You should not use this tool. Instead, continue to use a version [V_NEW] node\n" + + "on this data path. If necessary, you can use reindex-from-remote to copy the\n" + + "data from here into an older cluster.\n" + + "\n" + + "Do you want to proceed?\n"; + + private static final String TOO_OLD_MESSAGE = + DELIMITER + + "\n" + + "This data path was last written by Elasticsearch version [V_OLD] which may be\n" + + "too old to be readable by Elasticsearch version [V_CUR]. This tool will bypass\n" + + "this compatibility check, allowing a version [V_CUR] node to start on this data\n" + + "path, but this version [V_CUR] node may not be able to read this data or may\n" + + "read it incorrectly leading to data loss.\n" + + "\n" + + "You should not use this tool. Instead, upgrade this data path from [V_OLD] to\n" + + "[V_CUR] using one or more intermediate versions of Elasticsearch.\n" + + "\n" + + "Do you want to proceed?\n"; + + static final String NO_METADATA_MESSAGE = "no node metadata found, so there is no version to override"; + static final String SUCCESS_MESSAGE = "Successfully overwrote this node's metadata to bypass its version compatibility checks."; + + public OverrideNodeVersionCommand() { + super("Overwrite the version stored in this node's data path with [" + Version.CURRENT + + "] to bypass the version compatibility checks"); + } + + @Override + protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + final Path[] nodePaths = Arrays.stream(toNodePaths(dataPaths)).map(p -> p.path).toArray(Path[]::new); + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodePaths); + if (nodeMetaData == null) { + throw new ElasticsearchException(NO_METADATA_MESSAGE); + } + + try { + nodeMetaData.upgradeToCurrentVersion(); + throw new ElasticsearchException("found [" + nodeMetaData + "] which is compatible with current version [" + Version.CURRENT + + "], so there is no need to override the version checks"); + } catch (IllegalStateException e) { + // ok, means the version change is not supported + } + + confirm(terminal, (nodeMetaData.nodeVersion().before(Version.CURRENT) ? TOO_OLD_MESSAGE : TOO_NEW_MESSAGE) + .replace("V_OLD", nodeMetaData.nodeVersion().toString()) + .replace("V_NEW", nodeMetaData.nodeVersion().toString()) + .replace("V_CUR", Version.CURRENT.toString())); + + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeMetaData.nodeId(), Version.CURRENT), nodePaths); + + terminal.println(SUCCESS_MESSAGE); + } + + //package-private for testing + OptionParser getParser() { + return parser; + } +} diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 3f28fead29439..d5dbfe828665f 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -382,7 +382,7 @@ private List findStateFilesByGeneration(final long generation, Path... loc return files; } - private String getStateFileName(long generation) { + public String getStateFileName(long generation) { return prefix + generation + STATE_FILE_EXTENSION; } @@ -466,7 +466,7 @@ public static void deleteMetaState(Path... dataLocations) throws IOException { IOUtils.rm(stateDirectories); } - String getPrefix() { + public String getPrefix() { return prefix; } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java index 36f75c79a1792..37e260a01d069 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -19,12 +19,18 @@ package org.elasticsearch.env; +import org.elasticsearch.Version; +import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.startsWith; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -86,4 +92,35 @@ public Settings onNodeStopped(String nodeName) { + Node.NODE_DATA_SETTING.getKey() + "=false, but has shard data")); } + + private IllegalStateException expectThrowsOnRestart(CheckedConsumer onNodeStopped) { + internalCluster().startNode(); + final Path[] dataPaths = internalCluster().getInstance(NodeEnvironment.class).nodeDataPaths(); + return expectThrows(IllegalStateException.class, + () -> internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + try { + onNodeStopped.accept(dataPaths); + } catch (Exception e) { + throw new AssertionError(e); + } + return Settings.EMPTY; + } + })); + } + + public void testFailsToStartIfDowngraded() { + final IllegalStateException illegalStateException = expectThrowsOnRestart(dataPaths -> + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), NodeMetaDataTests.tooNewVersion()), dataPaths)); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot downgrade a node from version ["), endsWith("] to version [" + Version.CURRENT + "]"))); + } + + public void testFailsToStartIfUpgradedTooFar() { + final IllegalStateException illegalStateException = expectThrowsOnRestart(dataPaths -> + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), NodeMetaDataTests.tooOldVersion()), dataPaths)); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]"))); + } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetaDataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetaDataTests.java new file mode 100644 index 0000000000000..59cf6247f9613 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/NodeMetaDataTests.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import org.elasticsearch.Version; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.gateway.MetaDataStateFormat; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; + +public class NodeMetaDataTests extends ESTestCase { + private Version randomVersion() { + // VersionUtils.randomVersion() only returns known versions, which are necessarily no later than Version.CURRENT; however we want + // also to consider our behaviour with all versions, so occasionally pick up a truly random version. + return rarely() ? Version.fromId(randomInt()) : VersionUtils.randomVersion(random()); + } + + public void testEqualsHashcodeSerialization() { + final Path tempDir = createTempDir(); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(new NodeMetaData(randomAlphaOfLength(10), randomVersion()), + nodeMetaData -> { + final long generation = NodeMetaData.FORMAT.writeAndCleanup(nodeMetaData, tempDir); + final Tuple nodeMetaDataLongTuple + = NodeMetaData.FORMAT.loadLatestStateWithGeneration(logger, xContentRegistry(), tempDir); + assertThat(nodeMetaDataLongTuple.v2(), equalTo(generation)); + return nodeMetaDataLongTuple.v1(); + }, nodeMetaData -> { + if (randomBoolean()) { + return new NodeMetaData(randomAlphaOfLength(21 - nodeMetaData.nodeId().length()), nodeMetaData.nodeVersion()); + } else { + return new NodeMetaData(nodeMetaData.nodeId(), randomValueOtherThan(nodeMetaData.nodeVersion(), this::randomVersion)); + } + }); + } + + public void testReadsFormatWithoutVersion() throws IOException { + // the behaviour tested here is only appropriate if the current version is compatible with versions 7 and earlier + assertTrue(Version.CURRENT.minimumIndexCompatibilityVersion().onOrBefore(Version.V_7_0_0)); + // when the current version is incompatible with version 7, the behaviour should change to reject files like the given resource + // which do not have the version field + + final Path tempDir = createTempDir(); + final Path stateDir = Files.createDirectory(tempDir.resolve(MetaDataStateFormat.STATE_DIR_NAME)); + final InputStream resource = this.getClass().getResourceAsStream("testReadsFormatWithoutVersion.binary"); + assertThat(resource, notNullValue()); + Files.copy(resource, stateDir.resolve(NodeMetaData.FORMAT.getStateFileName(between(0, Integer.MAX_VALUE)))); + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), tempDir); + assertThat(nodeMetaData.nodeId(), equalTo("y6VUVMSaStO4Tz-B5BxcOw")); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.V_EMPTY)); + } + + public void testUpgradesLegitimateVersions() { + final String nodeId = randomAlphaOfLength(10); + final NodeMetaData nodeMetaData = new NodeMetaData(nodeId, + randomValueOtherThanMany(v -> v.after(Version.CURRENT) || v.before(Version.CURRENT.minimumIndexCompatibilityVersion()), + this::randomVersion)).upgradeToCurrentVersion(); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + } + + public void testUpgradesMissingVersion() { + final String nodeId = randomAlphaOfLength(10); + final NodeMetaData nodeMetaData = new NodeMetaData(nodeId, Version.V_EMPTY).upgradeToCurrentVersion(); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + } + + public void testDoesNotUpgradeFutureVersion() { + final IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> new NodeMetaData(randomAlphaOfLength(10), tooNewVersion()) + .upgradeToCurrentVersion()); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot downgrade a node from version ["), endsWith("] to version [" + Version.CURRENT + "]"))); + } + + public void testDoesNotUpgradeAncientVersion() { + final IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> new NodeMetaData(randomAlphaOfLength(10), tooOldVersion()).upgradeToCurrentVersion()); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]"))); + } + + public static Version tooNewVersion() { + return Version.fromId(between(Version.CURRENT.id + 1, 99999999)); + } + + public static Version tooOldVersion() { + return Version.fromId(between(1, Version.CURRENT.minimumIndexCompatibilityVersion().id - 1)); + } +} diff --git a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java new file mode 100644 index 0000000000000..704617c7b5e95 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.WriteStateException; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class OverrideNodeVersionCommandTests extends ESTestCase { + + private Environment environment; + private Path[] nodePaths; + + @Before + public void createNodePaths() throws IOException { + final Settings settings = buildEnvSettings(Settings.EMPTY); + environment = TestEnvironment.newEnvironment(settings); + try (NodeEnvironment nodeEnvironment = new NodeEnvironment(settings, environment)) { + nodePaths = nodeEnvironment.nodeDataPaths(); + } + } + + public void testFailsOnEmptyPath() { + final Path emptyPath = createTempDir(); + final MockTerminal mockTerminal = new MockTerminal(); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, new Path[]{emptyPath}, environment)); + assertThat(elasticsearchException.getMessage(), equalTo(OverrideNodeVersionCommand.NO_METADATA_MESSAGE)); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + } + + public void testFailsIfUnnecessary() throws WriteStateException { + final Version nodeVersion = Version.fromId(between(Version.CURRENT.minimumIndexCompatibilityVersion().id, Version.CURRENT.id)); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + assertThat(elasticsearchException.getMessage(), allOf( + containsString("compatible with current version"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + } + + public void testWarnsIfTooOld() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooOldVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput("n\n"); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + assertThat(elasticsearchException.getMessage(), equalTo("aborted by user")); + assertThat(mockTerminal.getOutput(), allOf( + containsString("too old"), + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(nodeVersion)); + } + + public void testWarnsIfTooNew() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooNewVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput(randomFrom("yy", "Yy", "n", "yes", "true", "N", "no")); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + assertThat(elasticsearchException.getMessage(), equalTo("aborted by user")); + assertThat(mockTerminal.getOutput(), allOf( + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(nodeVersion)); + } + + public void testOverwritesIfTooOld() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooOldVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput(randomFrom("y", "Y")); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment); + assertThat(mockTerminal.getOutput(), allOf( + containsString("too old"), + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()), + containsString(OverrideNodeVersionCommand.SUCCESS_MESSAGE))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + } + + public void testOverwritesIfTooNew() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooNewVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput(randomFrom("y", "Y")); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment); + assertThat(mockTerminal.getOutput(), allOf( + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()), + containsString(OverrideNodeVersionCommand.SUCCESS_MESSAGE))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + } +} diff --git a/server/src/test/resources/org/elasticsearch/env/testReadsFormatWithoutVersion.binary b/server/src/test/resources/org/elasticsearch/env/testReadsFormatWithoutVersion.binary new file mode 100644 index 0000000000000000000000000000000000000000..3a8bb297e7449461f9193810654025a61ae891da GIT binary patch literal 71 zcmcD&o+Hj$T#{Il%D}+D2*OsHT&%y^^72zs<1BPLaKC~Or0u{ U{mXwJ(3t!Js2B_;><`@o0PKks=>Px# literal 0 HcmV?d00001 From 49bd667cbd156fbd588d867c40f8b760c47281e1 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Tue, 21 May 2019 10:59:52 +0300 Subject: [PATCH 140/321] Bump version in BWC check after backport VERSION_HASHED_TOKENS needs to point to 7.2, after #42220 was merged --- .../org/elasticsearch/xpack/security/authc/TokenService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index ec5086201c68e..a8f68870556e6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -189,7 +189,7 @@ public final class TokenService { static final int MINIMUM_BYTES = VERSION_BYTES + TOKEN_LENGTH + 1; static final int LEGACY_MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * LEGACY_MINIMUM_BYTES) / 3)).intValue(); static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue(); - static final Version VERSION_HASHED_TOKENS = Version.V_8_0_0; + static final Version VERSION_HASHED_TOKENS = Version.V_7_2_0; static final Version VERSION_TOKENS_INDEX_INTRODUCED = Version.V_7_2_0; static final Version VERSION_ACCESS_TOKENS_AS_UUIDS = Version.V_7_2_0; static final Version VERSION_MULTIPLE_CONCURRENT_REFRESHES = Version.V_7_2_0; From 2c8aa0e6130efdb75e9248a251c6f6dbedd72d8d Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Tue, 21 May 2019 12:42:41 +0300 Subject: [PATCH 141/321] Fix version in tests since #41906 was merged --- .../org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java index 03c28c05e616b..035e29ccf771c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java @@ -34,7 +34,7 @@ public class RollupDateHistoUpgradeIT extends AbstractUpgradeTestCase { Version.fromString(System.getProperty("tests.upgrade_from_version")); public void testDateHistoIntervalUpgrade() throws Exception { - assumeTrue("DateHisto interval changed in 7.1", UPGRADE_FROM_VERSION.before(Version.V_8_0_0)); // TODO change this after backport + assumeTrue("DateHisto interval changed in 7.1", UPGRADE_FROM_VERSION.before(Version.V_7_2_0)); switch (CLUSTER_TYPE) { case OLD: break; From 6f8dfeb6b5462f4f129b6c223d99ed6a600cd450 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Tue, 21 May 2019 06:36:38 -0400 Subject: [PATCH 142/321] Add experimental and warnings to vector functions (#42205) --- docs/reference/query-dsl/script-score-query.asciidoc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index 42d4a7b1517e3..e8d97a31fa95f 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -78,10 +78,18 @@ to be the most efficient by using the internal mechanisms. [[vector-functions]] ===== Functions for vector fields + +experimental[] + These functions are used for for <> and <> fields. +NOTE: During vector functions' calculation, all matched documents are +linearly scanned. Thus, expect the query time grow linearly +with the number of matched documents. For this reason, we recommend +to limit the number of matched documents with a `query` parameter. + For dense_vector fields, `cosineSimilarity` calculates the measure of cosine similarity between a given query vector and document vectors. From e27035a1973235d36cb8e2b522782913d81b5676 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Tue, 21 May 2019 13:49:42 +0300 Subject: [PATCH 143/321] Use spearate testkit dir for each run (#42013) Gradle Testkit reuses the teskit dir by default between tests. With this change we use a temporary one for each run hoping it will fix #41431 --- ...portElasticsearchBuildResourcesTaskIT.java | 34 +++++++------------ .../gradle/precommit/JarHellTaskIT.java | 5 +-- .../test/GradleIntegrationTestCase.java | 16 ++++++++- .../testclusters/TestClustersPluginIT.java | 14 +++++--- 4 files changed, 38 insertions(+), 31 deletions(-) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java index 99afd0bcbe0ae..7968f4f57cf90 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; -import org.gradle.testkit.runner.GradleRunner; public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTestCase { @@ -29,25 +28,19 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe public static final String PROJECT_NAME = "elasticsearch-build-resources"; public void testUpToDateWithSourcesConfigured() { - GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + getGradleRunner(PROJECT_NAME) .withArguments("clean", "-s") - .withPluginClasspath() .build(); - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("buildResources", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); - result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + result = getGradleRunner(PROJECT_NAME) .withArguments("buildResources", "-s", "-i") - .withPluginClasspath() .build(); assertTaskUpToDate(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); @@ -55,10 +48,8 @@ public void testUpToDateWithSourcesConfigured() { } public void testImplicitTaskDependencyCopy() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("clean", "sampleCopyAll", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":buildResources"); @@ -69,10 +60,8 @@ public void testImplicitTaskDependencyCopy() { } public void testImplicitTaskDependencyInputFileOfOther() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("clean", "sample", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":sample"); @@ -81,11 +70,12 @@ public void testImplicitTaskDependencyInputFileOfOther() { } public void testIncorrectUsage() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) - .withArguments("noConfigAfterExecution", "-s", "-i") - .withPluginClasspath() - .buildAndFail(); - assertOutputContains("buildResources can't be configured after the task ran"); + assertOutputContains( + getGradleRunner(PROJECT_NAME) + .withArguments("noConfigAfterExecution", "-s", "-i") + .buildAndFail() + .getOutput(), + "buildResources can't be configured after the task ran" + ); } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java index e5624a15d92df..d45028d844542 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java @@ -2,7 +2,6 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; -import org.gradle.testkit.runner.GradleRunner; /* * Licensed to Elasticsearch under one or more contributor @@ -25,10 +24,8 @@ public class JarHellTaskIT extends GradleIntegrationTestCase { public void testJarHellDetected() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("jarHell")) + BuildResult result = getGradleRunner("jarHell") .withArguments("clean", "precommit", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) - .withPluginClasspath() .buildAndFail(); assertTaskFailed(result, ":jarHell"); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index f7a0382cec775..46a9194780c2a 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -4,8 +4,12 @@ import org.gradle.testkit.runner.BuildTask; import org.gradle.testkit.runner.GradleRunner; import org.gradle.testkit.runner.TaskOutcome; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; import java.io.File; +import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.List; @@ -16,6 +20,9 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { + @Rule + public TemporaryFolder testkitTmpDir = new TemporaryFolder(); + protected File getProjectDir(String name) { File root = new File("src/testKit/"); if (root.exists() == false) { @@ -26,9 +33,16 @@ protected File getProjectDir(String name) { } protected GradleRunner getGradleRunner(String sampleProject) { + File testkit; + try { + testkit = testkitTmpDir.newFolder(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } return GradleRunner.create() .withProjectDir(getProjectDir(sampleProject)) - .withPluginClasspath(); + .withPluginClasspath() + .withTestKitDir(testkit); } protected File getBuildDir(String name) { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 84b13340c35cf..c9086d1459afd 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -21,12 +21,21 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; +import org.junit.Before; import org.junit.Ignore; + import java.util.Arrays; public class TestClustersPluginIT extends GradleIntegrationTestCase { + private GradleRunner runner; + + @Before + public void setUp() throws Exception { + runner = getGradleRunner("testclusters"); + } + public void testListClusters() { BuildResult result = getTestClustersRunner("listTestClusters").build(); @@ -190,10 +199,7 @@ private GradleRunner getTestClustersRunner(String... tasks) { arguments[tasks.length] = "-s"; arguments[tasks.length + 1] = "-i"; arguments[tasks.length + 2] = "-Dlocal.repo.path=" + getLocalTestRepoPath(); - return GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments(arguments) - .withPluginClasspath(); + return runner.withArguments(arguments); } private void assertStartedAndStoppedOnce(BuildResult result, String nodeName) { From e126d18859cae32d4d507ad331d6dac26e1d8980 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Tue, 21 May 2019 06:46:56 -0500 Subject: [PATCH 144/321] add 7.1.1 and 6.8.1 versions (#42253) --- server/src/main/java/org/elasticsearch/Version.java | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index dacb28f90b351..c881c105cabb0 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -94,10 +94,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_7_1 = new Version(V_6_7_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_7_2_ID = 6070299; public static final Version V_6_7_2 = new Version(V_6_7_2_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); - public static final int V_6_7_3_ID = 6070399; - public static final Version V_6_7_3 = new Version(V_6_7_3_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_8_0_ID = 6080099; public static final Version V_6_8_0 = new Version(V_6_8_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); + public static final int V_6_8_1_ID = 6080199; + public static final Version V_6_8_1 = new Version(V_6_8_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_0_1_ID = 7000199; @@ -106,6 +106,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_0_2 = new Version(V_7_0_2_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_0_ID = 7010099; public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_1_1_ID = 7010199; + public static final Version V_7_1_1 = new Version(V_7_1_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_2_0_ID = 7020099; public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_8_0_0_ID = 8000099; @@ -128,6 +130,8 @@ public static Version fromId(int id) { return V_8_0_0; case V_7_2_0_ID: return V_7_2_0; + case V_7_1_1_ID: + return V_7_1_1; case V_7_1_0_ID: return V_7_1_0; case V_7_0_2_ID: @@ -136,10 +140,10 @@ public static Version fromId(int id) { return V_7_0_1; case V_7_0_0_ID: return V_7_0_0; + case V_6_8_1_ID: + return V_6_8_1; case V_6_8_0_ID: return V_6_8_0; - case V_6_7_3_ID: - return V_6_7_3; case V_6_7_1_ID: return V_6_7_1; case V_6_7_2_ID: From 026c96d74f8c986d5f2d6dc91144f86d57000ea3 Mon Sep 17 00:00:00 2001 From: Kamyar Ghajar Date: Tue, 21 May 2019 17:18:40 +0430 Subject: [PATCH 145/321] Add missing comma in code section (#41678) Missing comma in code section (line 114) is added to the doc file. --- docs/reference/search/rank-eval.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index b25048fec90e6..0ec2e070b1c74 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -111,7 +111,7 @@ GET /my_index/_rank_eval ], "requests": [ { - "id": "amsterdam_query" + "id": "amsterdam_query", "ratings": [ ... ], "template_id": "match_one_field_query", <3> "params": { <4> From c8974045aeffea6b5b1edf5297508156b5c288c2 Mon Sep 17 00:00:00 2001 From: Glen Smith Date: Tue, 21 May 2019 14:59:40 +0200 Subject: [PATCH 146/321] Remove stray back tick that's messing up table format (#41705) --- docs/reference/cat/thread_pool.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index d1ea1fad88515..03854fae2f61f 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -59,7 +59,7 @@ ml_autodetect (default distro only) ml_datafeed (default distro only) ml_utility (default distro only) refresh -rollup_indexing (default distro only)` +rollup_indexing (default distro only) search security-token-key (default distro only) snapshot From 1911d2af7c993d4709b9c3bf0d2769c59cdb7d0e Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Tue, 21 May 2019 16:37:11 +0300 Subject: [PATCH 147/321] Move the FIPS configuration back to the build plugin (#41989) * Move the FIPS configuration back to the build plugin This is necesary for external users of build-tools. Closes #41721 --- build.gradle | 15 --------------- .../org/elasticsearch/gradle/BuildPlugin.groovy | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/build.gradle b/build.gradle index bb75439bcae4e..8794a1f930523 100644 --- a/build.gradle +++ b/build.gradle @@ -619,21 +619,6 @@ allprojects { } } -subprojects { - // Common config when running with a FIPS-140 runtime JVM - if (project.ext.has("inFipsJvm") && project.ext.inFipsJvm) { - tasks.withType(Test) { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } - project.pluginManager.withPlugin("elasticsearch.testclusters") { - project.testClusters.all { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } - } - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 57a35052a3e80..51300ffc628c9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -116,6 +116,22 @@ class BuildPlugin implements Plugin { configureTestTasks(project) configurePrecommit(project) configureDependenciesInfo(project) + + // Common config when running with a FIPS-140 runtime JVM + // Need to do it here to support external plugins + if (project.ext.inFipsJvm) { + project.tasks.withType(Test) { + systemProperty 'javax.net.ssl.trustStorePassword', 'password' + systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } + project.pluginManager.withPlugin("elasticsearch.testclusters") { + project.testClusters.all { + systemProperty 'javax.net.ssl.trustStorePassword', 'password' + systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } + } + } + } From 455cf8bbb49142ed4886b4f580d8fcf6b8e4d4ad Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Tue, 21 May 2019 10:25:23 -0400 Subject: [PATCH 148/321] Mute all ml_datafeed_crud rolling upgrade tests AwaitsFix https://github.com/elastic/elasticsearch/issues/42258 --- .../rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml | 5 +++++ .../rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml | 5 +++++ .../test/upgraded_cluster/40_ml_datafeed_crud.yml | 4 ++++ 3 files changed, 14 insertions(+) diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index e453014258a24..4d732015d47f4 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,3 +1,8 @@ +setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + --- "Test old cluster datafeed without aggs": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index 8347e09e0c17d..2a7b56adb9a16 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -1,3 +1,8 @@ +setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + --- "Put job and datafeed without aggs in old cluster": diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index 5dc71ecb0679e..4b742e10de61f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -1,4 +1,8 @@ setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + - do: cluster.health: wait_for_status: green From aac74733b75e8a6e467e129e74fc33c68fa33bce Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Tue, 21 May 2019 15:51:55 +0200 Subject: [PATCH 149/321] Remove 7.0.2 (#42282) 7.0.2 removed, since it will never be, fixing branch consistency check. --- server/src/main/java/org/elasticsearch/Version.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index c881c105cabb0..0a6b19444efa7 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -102,8 +102,6 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_0_1_ID = 7000199; public static final Version V_7_0_1 = new Version(V_7_0_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final int V_7_0_2_ID = 7000299; - public static final Version V_7_0_2 = new Version(V_7_0_2_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_0_ID = 7010099; public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_1_ID = 7010199; @@ -134,8 +132,6 @@ public static Version fromId(int id) { return V_7_1_1; case V_7_1_0_ID: return V_7_1_0; - case V_7_0_2_ID: - return V_7_0_2; case V_7_0_1_ID: return V_7_0_1; case V_7_0_0_ID: From b8be2d05393192e1ce8951d4f1552202eea7d470 Mon Sep 17 00:00:00 2001 From: MK Swanson Date: Tue, 21 May 2019 11:20:36 -0400 Subject: [PATCH 150/321] [DOCS] Copied note on slicing support to Slicing section. Closes 26114 (#40426) --- docs/reference/docs/reindex.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index b085e081b4dd7..e96c262d67bb4 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -982,6 +982,10 @@ Reindex supports <> to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. +NOTE: Reindexing from remote clusters does not support +<> or +<>. + [float] [[docs-reindex-manual-slice]] ==== Manual slicing From 78259f75de92e41623ec6dd02f9772b5340c86ae Mon Sep 17 00:00:00 2001 From: Henning Andersen Date: Tue, 21 May 2019 17:23:14 +0200 Subject: [PATCH 151/321] Revert "Remove 7.0.2 (#42282)" This reverts commit aac74733b75e8a6e467e129e74fc33c68fa33bce. --- server/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 0a6b19444efa7..c881c105cabb0 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -102,6 +102,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_0_1_ID = 7000199; public static final Version V_7_0_1 = new Version(V_7_0_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_0_2_ID = 7000299; + public static final Version V_7_0_2 = new Version(V_7_0_2_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_0_ID = 7010099; public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_1_ID = 7010199; @@ -132,6 +134,8 @@ public static Version fromId(int id) { return V_7_1_1; case V_7_1_0_ID: return V_7_1_0; + case V_7_0_2_ID: + return V_7_0_2; case V_7_0_1_ID: return V_7_0_1; case V_7_0_0_ID: From a2c35a7f9b3f5a6d7cf2271c2395c07451cc7585 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Tue, 21 May 2019 15:51:55 +0200 Subject: [PATCH 152/321] Remove 7.0.2 (#42282) 7.0.2 removed, since it will never be, fixing branch consistency check. --- server/src/main/java/org/elasticsearch/Version.java | 4 ---- .../authc/support/mapper/ExpressionRoleMappingTests.java | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index c881c105cabb0..0a6b19444efa7 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -102,8 +102,6 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_0_1_ID = 7000199; public static final Version V_7_0_1 = new Version(V_7_0_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final int V_7_0_2_ID = 7000299; - public static final Version V_7_0_2 = new Version(V_7_0_2_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_0_ID = 7010099; public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_1_ID = 7010199; @@ -134,8 +132,6 @@ public static Version fromId(int id) { return V_7_1_1; case V_7_1_0_ID: return V_7_1_0; - case V_7_0_2_ID: - return V_7_0_2; case V_7_0_1_ID: return V_7_0_1; case V_7_0_0_ID: diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java index e0ab888f4b4bb..e93b42a6d3a04 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -304,7 +304,7 @@ public void testSerialization() throws Exception { public void testSerializationPreV71() throws Exception { final ExpressionRoleMapping original = randomRoleMapping(false); - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_0_2); + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_0_1); BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(version); original.writeTo(output); From 753726c8c556463a8ccf8872223a9d64156da772 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 21 May 2019 19:22:13 +0200 Subject: [PATCH 153/321] Safer Wait for Snapshot Success in ClusterPrivilegeTests (#40943) * Safer Wait for Snapshot Success in ClusterPrivilegeTests * The snapshot state returned by the API might become SUCCESS before it's fully removed from the cluster state. * We should fix this race in the transport API but it's not trivial and will be part of the incoming big round of refactoring the repository interaction, this added check fixes the test for now * closes #38030 --- .../elasticsearch/integration/ClusterPrivilegeTests.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java index 2ceb14a172fe4..384401edaf510 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java @@ -6,12 +6,14 @@ package org.elasticsearch.integration; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.client.Request; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.hamcrest.Matchers; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -139,7 +141,6 @@ public void testThatClusterPrivilegesWorkAsExpectedViaHttp() throws Exception { assertAccessIsDenied("user_d", "PUT", "/_cluster/settings", "{ \"transient\" : { \"search.default_search_timeout\": \"1m\" } }"); } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/38030") public void testThatSnapshotAndRestore() throws Exception { String repoJson = Strings.toString(jsonBuilder().startObject().field("type", "fs").startObject("settings").field("location", repositoryLocation.toString()).endObject().endObject()); @@ -203,6 +204,11 @@ private void waitForSnapshotToFinish(String repo, String snapshot) throws Except assertBusy(() -> { SnapshotsStatusResponse response = client().admin().cluster().prepareSnapshotStatus(repo).setSnapshots(snapshot).get(); assertThat(response.getSnapshots().get(0).getState(), is(SnapshotsInProgress.State.SUCCESS)); + // The status of the snapshot in the repository can become SUCCESS before it is fully finalized in the cluster state so wait for + // it to disappear from the cluster state as well + SnapshotsInProgress snapshotsInProgress = + client().admin().cluster().state(new ClusterStateRequest()).get().getState().custom(SnapshotsInProgress.TYPE); + assertThat(snapshotsInProgress.entries(), Matchers.empty()); }); } } From 8907dc9598667a1fa29be0ba22c7030ebee1101b Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Tue, 21 May 2019 10:38:40 -0700 Subject: [PATCH 154/321] mute failing filerealm hash caching tests (#42304) some tests are failing after the introduction of #41792. relates #42267 and #42289. --- .../elasticsearch/xpack/security/authc/file/FileRealmTests.java | 1 + .../authc/support/CachingUsernamePasswordRealmTests.java | 1 + 2 files changed, 2 insertions(+) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index 9d05495449805..168f608951e09 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -90,6 +90,7 @@ private RealmConfig getRealmConfig(Settings settings) { return new RealmConfig(REALM_IDENTIFIER, settings, TestEnvironment.newEnvironment(settings), threadContext); } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42267") public void testAuthenticateCaching() throws Exception { Settings settings = Settings.builder() .put(RealmSettings.realmSettingPrefix(REALM_IDENTIFIER) + "cache.hash_algo", diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java index 2fed720e23c09..8b30cb85fed78 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java @@ -64,6 +64,7 @@ public void stop() { } } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42267") public void testCacheSettings() { String cachingHashAlgo = Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT); int maxUsers = randomIntBetween(10, 100); From 7c503ce20d762b82da975764482cc6eace542fa2 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 21 May 2019 18:40:04 +0100 Subject: [PATCH 155/321] [ML Data Frame] Persist and restore checkpoint and position (#41942) Persist and restore Data frame's current checkpoint and position --- .../DataFrameIndexerTransformStats.java | 9 -- .../DataFrameTransformProgress.java | 6 +- .../transforms/DataFrameTransformState.java | 12 +-- .../DataFrameTransformStateAndStats.java | 25 ++++- .../core/indexing/AsyncTwoPhaseIndexer.java | 8 +- .../integration/DataFrameRestTestCase.java | 2 +- .../integration/DataFrameUsageIT.java | 8 +- .../xpack/dataframe/DataFrameFeatureSet.java | 8 +- ...portGetDataFrameTransformsStatsAction.java | 82 ++------------ ...portStartDataFrameTransformTaskAction.java | 2 +- .../persistence/DataFrameInternalIndex.java | 102 ++++++++++++------ .../DataFrameTransformsConfigManager.java | 53 +++++++-- ...FrameTransformPersistentTasksExecutor.java | 51 +++++---- .../transforms/DataFrameTransformTask.java | 35 +++++- ...DataFrameTransformsConfigManagerTests.java | 41 +++++++ .../test/data_frame/transforms_start_stop.yml | 48 +-------- .../test/data_frame/transforms_stats.yml | 6 +- 17 files changed, 288 insertions(+), 210 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java index c2981c40dfdc1..8f83fd375490d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java @@ -109,15 +109,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(transformId); } - /** - * Get the persisted stats document name from the Data Frame Transformer Id. - * - * @return The id of document the where the transform stats are persisted - */ - public static String documentId(String transformId) { - return NAME + "-" + transformId; - } - @Nullable public String getTransformId() { return transformId; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgress.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgress.java index 5b7346bca2a38..0741be296ed4d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgress.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgress.java @@ -23,9 +23,9 @@ public class DataFrameTransformProgress implements Writeable, ToXContentObject { - private static final ParseField TOTAL_DOCS = new ParseField("total_docs"); - private static final ParseField DOCS_REMAINING = new ParseField("docs_remaining"); - private static final String PERCENT_COMPLETE = "percent_complete"; + public static final ParseField TOTAL_DOCS = new ParseField("total_docs"); + public static final ParseField DOCS_REMAINING = new ParseField("docs_remaining"); + public static final String PERCENT_COMPLETE = "percent_complete"; public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_frame_transform_progress", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java index bc1b710cd2e6f..d4480caa0b9a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java @@ -42,12 +42,12 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState @Nullable private final String reason; - private static final ParseField TASK_STATE = new ParseField("task_state"); - private static final ParseField INDEXER_STATE = new ParseField("indexer_state"); - private static final ParseField CURRENT_POSITION = new ParseField("current_position"); - private static final ParseField CHECKPOINT = new ParseField("checkpoint"); - private static final ParseField REASON = new ParseField("reason"); - private static final ParseField PROGRESS = new ParseField("progress"); + public static final ParseField TASK_STATE = new ParseField("task_state"); + public static final ParseField INDEXER_STATE = new ParseField("indexer_state"); + public static final ParseField CURRENT_POSITION = new ParseField("current_position"); + public static final ParseField CHECKPOINT = new ParseField("checkpoint"); + public static final ParseField REASON = new ParseField("reason"); + public static final ParseField PROGRESS = new ParseField("progress"); @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java index 2a145ba260f4e..d28d64bdb1e82 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.transforms; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -14,6 +15,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.indexing.IndexerState; @@ -22,7 +24,7 @@ public class DataFrameTransformStateAndStats implements Writeable, ToXContentObject { - private static final String NAME = "data_frame_transform_state_and_stats"; + public static final String NAME = "data_frame_transform_state_and_stats"; public static final ParseField STATE_FIELD = new ParseField("state"); public static final ParseField CHECKPOINTING_INFO_FIELD = new ParseField("checkpointing"); @@ -47,6 +49,10 @@ public class DataFrameTransformStateAndStats implements Writeable, ToXContentObj (p, c) -> DataFrameTransformCheckpointingInfo.fromXContent(p), CHECKPOINTING_INFO_FIELD); } + public static DataFrameTransformStateAndStats fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + public static DataFrameTransformStateAndStats initialStateAndStats(String id) { return initialStateAndStats(id, new DataFrameIndexerTransformStats(id)); } @@ -58,6 +64,15 @@ public static DataFrameTransformStateAndStats initialStateAndStats(String id, Da DataFrameTransformCheckpointingInfo.EMPTY); } + /** + * Get the persisted state and stats document name from the Data Frame Transform Id. + * + * @return The id of document the where the transform stats are persisted + */ + public static String documentId(String transformId) { + return NAME + "-" + transformId; + } + public DataFrameTransformStateAndStats(String id, DataFrameTransformState state, DataFrameIndexerTransformStats stats, DataFrameTransformCheckpointingInfo checkpointingInfo) { this.id = Objects.requireNonNull(id); @@ -73,6 +88,11 @@ public DataFrameTransformStateAndStats(StreamInput in) throws IOException { this.checkpointingInfo = new DataFrameTransformCheckpointingInfo(in); } + @Nullable + public String getTransformId() { + return transformStats.getTransformId(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -80,6 +100,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(STATE_FIELD.getPreferredName(), transformState, params); builder.field(DataFrameField.STATS_FIELD.getPreferredName(), transformStats, params); builder.field(CHECKPOINTING_INFO_FIELD.getPreferredName(), checkpointingInfo, params); + if (params.paramAsBoolean(DataFrameField.FOR_INTERNAL_STORAGE, false)) { + builder.field(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), NAME); + } builder.endObject(); return builder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index ccf075b13ae5a..80b0378ae35ff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -16,6 +16,7 @@ import java.util.List; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; /** @@ -94,16 +95,21 @@ public synchronized IndexerState start() { * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the job was already aborted). */ public synchronized IndexerState stop() { + AtomicBoolean wasStartedAndSetStopped = new AtomicBoolean(false); IndexerState currentState = state.updateAndGet(previousState -> { if (previousState == IndexerState.INDEXING) { return IndexerState.STOPPING; } else if (previousState == IndexerState.STARTED) { - onStop(); + wasStartedAndSetStopped.set(true); return IndexerState.STOPPED; } else { return previousState; } }); + + if (wasStartedAndSetStopped.get()) { + onStop(); + } return currentState; } diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index db07e8513cc2d..7ffa5391b7a4a 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -292,7 +292,7 @@ public static void removeIndices() throws Exception { wipeIndices(); } - public void wipeDataFrameTransforms() throws IOException, InterruptedException { + public void wipeDataFrameTransforms() throws IOException { List> transformConfigs = getDataFrameTransforms(); for (Map transformConfig : transformConfigs) { String transformId = (String) transformConfig.get("id"); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java index 24ce173b37567..4f209c5a9f3f4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -10,7 +10,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.junit.Before; @@ -72,7 +72,7 @@ public void testUsage() throws Exception { Request statsExistsRequest = new Request("GET", DataFrameInternalIndex.INDEX_NAME+"/_search?q=" + INDEX_DOC_TYPE.getPreferredName() + ":" + - DataFrameIndexerTransformStats.NAME); + DataFrameTransformStateAndStats.NAME); // Verify that we have our two stats documents assertBusy(() -> { Map hasStatsMap = entityAsMap(client().performRequest(statsExistsRequest)); @@ -100,7 +100,6 @@ public void testUsage() throws Exception { expectedStats.merge(statName, statistic, Integer::sum); } - usageResponse = client().performRequest(new Request("GET", "_xpack/usage")); usageAsMap = entityAsMap(usageResponse); @@ -109,7 +108,8 @@ public void testUsage() throws Exception { assertEquals(1, XContentMapValues.extractValue("data_frame.transforms.started", usageAsMap)); assertEquals(2, XContentMapValues.extractValue("data_frame.transforms.stopped", usageAsMap)); for(String statName : PROVIDED_STATS) { - assertEquals(expectedStats.get(statName), XContentMapValues.extractValue("data_frame.stats."+statName, usageAsMap)); + assertEquals("Incorrect stat " + statName, + expectedStats.get(statName), XContentMapValues.extractValue("data_frame.stats." + statName, usageAsMap)); } } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java index 029fe88766df5..82b8a6060e44e 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java @@ -35,6 +35,7 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; @@ -176,6 +177,7 @@ static DataFrameIndexerTransformStats parseSearchAggs(SearchResponse searchRespo for(String statName : PROVIDED_STATS) { Aggregation agg = searchResponse.getAggregations().get(statName); + if (agg instanceof NumericMetricsAggregation.SingleValue) { statisticsList.add((long)((NumericMetricsAggregation.SingleValue)agg).value()); } else { @@ -197,14 +199,15 @@ static DataFrameIndexerTransformStats parseSearchAggs(SearchResponse searchRespo static void getStatisticSummations(Client client, ActionListener statsListener) { QueryBuilder queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), - DataFrameIndexerTransformStats.NAME))); + DataFrameTransformStateAndStats.NAME))); SearchRequestBuilder requestBuilder = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME) .setSize(0) .setQuery(queryBuilder); + final String path = DataFrameField.STATS_FIELD.getPreferredName() + "."; for(String statName : PROVIDED_STATS) { - requestBuilder.addAggregation(AggregationBuilders.sum(statName).field(statName)); + requestBuilder.addAggregation(AggregationBuilders.sum(statName).field(path + statName)); } ActionListener getStatisticSummationsListener = ActionListener.wrap( @@ -213,6 +216,7 @@ static void getStatisticSummations(Client client, ActionListener { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java index bb01da4c7e50a..df2d09a875d19 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java @@ -9,51 +9,29 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; -import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Request; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Response; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; -import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; @@ -69,18 +47,16 @@ public class TransportGetDataFrameTransformsStatsAction extends private static final Logger logger = LogManager.getLogger(TransportGetDataFrameTransformsStatsAction.class); - private final Client client; private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; private final DataFrameTransformsCheckpointService transformsCheckpointService; @Inject public TransportGetDataFrameTransformsStatsAction(TransportService transportService, ActionFilters actionFilters, - ClusterService clusterService, Client client, + ClusterService clusterService, DataFrameTransformsConfigManager dataFrameTransformsConfigManager, DataFrameTransformsCheckpointService transformsCheckpointService) { super(GetDataFrameTransformsStatsAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, Response::new, ThreadPool.Names.SAME); - this.client = client; this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; this.transformsCheckpointService = transformsCheckpointService; } @@ -157,32 +133,14 @@ private void collectStatsForTransformsWithoutTasks(Request request, // Small assurance that we are at least below the max. Terms search has a hard limit of 10k, we should at least be below that. assert transformsWithoutTasks.size() <= Request.MAX_SIZE_RETURN; - ActionListener searchStatsListener = ActionListener.wrap( - searchResponse -> { - List nodeFailures = new ArrayList<>(response.getNodeFailures()); - if (searchResponse.getShardFailures().length > 0) { - for(ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) { - String nodeId = ""; - if (shardSearchFailure.shard() != null) { - nodeId = shardSearchFailure.shard().getNodeId(); - } - nodeFailures.add(new FailedNodeException(nodeId, shardSearchFailure.toString(), shardSearchFailure.getCause())); - } - logger.error("transform statistics document search returned shard failures: {}", - Arrays.toString(searchResponse.getShardFailures())); - } + ActionListener> searchStatsListener = ActionListener.wrap( + stats -> { List allStateAndStats = response.getTransformsStateAndStats(); - for(SearchHit hit : searchResponse.getHits().getHits()) { - BytesReference source = hit.getSourceRef(); - try { - DataFrameIndexerTransformStats stats = parseFromSource(source); - allStateAndStats.add(DataFrameTransformStateAndStats.initialStateAndStats(stats.getTransformId(), stats)); - transformsWithoutTasks.remove(stats.getTransformId()); - } catch (IOException e) { - listener.onFailure(new ElasticsearchParseException("Could not parse data frame transform stats", e)); - return; - } - } + allStateAndStats.addAll(stats); + transformsWithoutTasks.removeAll( + stats.stream().map(DataFrameTransformStateAndStats::getId).collect(Collectors.toSet())); + + // Transforms that have not been started and have no state or stats. transformsWithoutTasks.forEach(transformId -> allStateAndStats.add(DataFrameTransformStateAndStats.initialStateAndStats(transformId))); @@ -190,7 +148,7 @@ private void collectStatsForTransformsWithoutTasks(Request request, // it can easily become arbitrarily ordered based on which transforms don't have a task or stats docs allStateAndStats.sort(Comparator.comparing(DataFrameTransformStateAndStats::getId)); - listener.onResponse(new Response(allStateAndStats, response.getTaskFailures(), nodeFailures)); + listener.onResponse(new Response(allStateAndStats, response.getTaskFailures(), response.getNodeFailures())); }, e -> { if (e instanceof IndexNotFoundException) { @@ -201,26 +159,6 @@ private void collectStatsForTransformsWithoutTasks(Request request, } ); - QueryBuilder builder = QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() - .filter(QueryBuilders.termsQuery(DataFrameField.ID.getPreferredName(), transformsWithoutTasks)) - .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), DataFrameIndexerTransformStats.NAME))); - - SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME) - .addSort(DataFrameField.ID.getPreferredName(), SortOrder.ASC) - .setQuery(builder) - .request(); - - ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ClientHelper.DATA_FRAME_ORIGIN, - searchRequest, - searchStatsListener, client::search); - } - - private static DataFrameIndexerTransformStats parseFromSource(BytesReference source) throws IOException { - try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { - return DataFrameIndexerTransformStats.fromXContent(parser); - } + dataFrameTransformsConfigManager.getTransformStats(transformsWithoutTasks, searchStatsListener); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java index 9f016b58f3b5f..f8e3a3f1e852f 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java @@ -59,7 +59,7 @@ protected void doExecute(Task task, StartDataFrameTransformTaskAction.Request re protected void taskOperation(StartDataFrameTransformTaskAction.Request request, DataFrameTransformTask transformTask, ActionListener listener) { if (transformTask.getTransformId().equals(request.getId())) { - transformTask.start(listener); + transformTask.start(null, listener); } else { listener.onFailure(new RuntimeException("ID of data frame transform task [" + transformTask.getTransformId() + "] does not match request's ID [" + request.getId() + "]")); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java index 17a49d8b7e834..e28f8005448d9 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java @@ -17,6 +17,9 @@ import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; @@ -50,7 +53,7 @@ public final class DataFrameInternalIndex { public static final String RAW = "raw"; // data types - public static final String DOUBLE = "double"; + public static final String FLOAT = "float"; public static final String LONG = "long"; public static final String KEYWORD = "keyword"; @@ -129,7 +132,7 @@ private static XContentBuilder mappings() throws IOException { // add the schema for transform configurations addDataFrameTransformsConfigMappings(builder); // add the schema for transform stats - addDataFrameTransformsStatsMappings(builder); + addDataFrameTransformStateAndStatsMappings(builder); // end type builder.endObject(); // end properties @@ -140,37 +143,76 @@ private static XContentBuilder mappings() throws IOException { } - private static XContentBuilder addDataFrameTransformsStatsMappings(XContentBuilder builder) throws IOException { + private static XContentBuilder addDataFrameTransformStateAndStatsMappings(XContentBuilder builder) throws IOException { return builder - .startObject(DataFrameIndexerTransformStats.NUM_PAGES.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.NUM_INPUT_DOCUMENTS.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.NUM_OUTPUT_DOCUMENTS.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.NUM_INVOCATIONS.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.INDEX_TIME_IN_MS.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.SEARCH_TIME_IN_MS.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.INDEX_TOTAL.getPreferredName()) - .field(TYPE, LONG) - .endObject() - .startObject(DataFrameIndexerTransformStats.SEARCH_TOTAL.getPreferredName()) - .field(TYPE, LONG) + .startObject(DataFrameTransformStateAndStats.STATE_FIELD.getPreferredName()) + .startObject(PROPERTIES) + .startObject(DataFrameTransformState.TASK_STATE.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DataFrameTransformState.INDEXER_STATE.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DataFrameTransformState.CURRENT_POSITION.getPreferredName()) + .field(ENABLED, false) + .endObject() + .startObject(DataFrameTransformState.CHECKPOINT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameTransformState.REASON.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DataFrameTransformState.PROGRESS.getPreferredName()) + .startObject(PROPERTIES) + .startObject(DataFrameTransformProgress.TOTAL_DOCS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameTransformProgress.DOCS_REMAINING.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameTransformProgress.PERCENT_COMPLETE) + .field(TYPE, FLOAT) + .endObject() + .endObject() + .endObject() + .endObject() .endObject() - .startObject(DataFrameIndexerTransformStats.SEARCH_FAILURES.getPreferredName()) - .field(TYPE, LONG) + .startObject(DataFrameField.STATS_FIELD.getPreferredName()) + .startObject(PROPERTIES) + .startObject(DataFrameIndexerTransformStats.NUM_PAGES.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.NUM_INPUT_DOCUMENTS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.NUM_OUTPUT_DOCUMENTS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.NUM_INVOCATIONS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.INDEX_TIME_IN_MS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.SEARCH_TIME_IN_MS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.INDEX_TOTAL.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.SEARCH_TOTAL.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.SEARCH_FAILURES.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataFrameIndexerTransformStats.INDEX_FAILURES.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .endObject() .endObject() - .startObject(DataFrameIndexerTransformStats.INDEX_FAILURES.getPreferredName()) - .field(TYPE, LONG) + .startObject(DataFrameTransformStateAndStats.CHECKPOINTING_INFO_FIELD.getPreferredName()) + .field(ENABLED, false) .endObject(); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java index e8c1e012b7b30..ab893545a0d50 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java @@ -44,13 +44,14 @@ import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; @@ -274,13 +275,13 @@ public void deleteTransform(String transformId, ActionListener listener })); } - public void putOrUpdateTransformStats(DataFrameIndexerTransformStats stats, ActionListener listener) { + public void putOrUpdateTransformStats(DataFrameTransformStateAndStats stats, ActionListener listener) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder source = stats.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.INDEX_NAME) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .id(DataFrameIndexerTransformStats.documentId(stats.getTransformId())) + .id(DataFrameTransformStateAndStats.documentId(stats.getTransformId())) .source(source); executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( @@ -297,8 +298,8 @@ public void putOrUpdateTransformStats(DataFrameIndexerTransformStats stats, Acti } } - public void getTransformStats(String transformId, ActionListener resultListener) { - GetRequest getRequest = new GetRequest(DataFrameInternalIndex.INDEX_NAME, DataFrameIndexerTransformStats.documentId(transformId)); + public void getTransformStats(String transformId, ActionListener resultListener) { + GetRequest getRequest = new GetRequest(DataFrameInternalIndex.INDEX_NAME, DataFrameTransformStateAndStats.documentId(transformId)); executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, GetAction.INSTANCE, getRequest, ActionListener.wrap(getResponse -> { if (getResponse.isExists() == false) { @@ -310,7 +311,7 @@ public void getTransformStats(String transformId, ActionListener transformIds, ActionListener> listener) { + + QueryBuilder builder = QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termsQuery(DataFrameField.ID.getPreferredName(), transformIds)) + .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), DataFrameTransformStateAndStats.NAME))); + + SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME) + .addSort(DataFrameField.ID.getPreferredName(), SortOrder.ASC) + .setQuery(builder) + .request(); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), DATA_FRAME_ORIGIN, searchRequest, + ActionListener.wrap( + searchResponse -> { + List stats = new ArrayList<>(); + for (SearchHit hit : searchResponse.getHits().getHits()) { + BytesReference source = hit.getSourceRef(); + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + stats.add(DataFrameTransformStateAndStats.fromXContent(parser)); + } catch (IOException e) { + listener.onFailure( + new ElasticsearchParseException("failed to parse data frame stats from search hit", e)); + return; + } + } + + listener.onResponse(stats); + }, + e -> { + if (e.getClass() == IndexNotFoundException.class) { + listener.onResponse(Collections.emptyList()); + } else { + listener.onFailure(e); + } + } + ), client::search); + } + private void parseTransformLenientlyFromSource(BytesReference source, String transformId, ActionListener transformListener) { try (InputStream stream = source.streamInput(); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java index 5b0c0e7dfc19b..9ed8da61d8feb 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java @@ -26,10 +26,10 @@ import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; @@ -106,44 +106,47 @@ static List verifyIndicesPrimaryShardsAreActive(ClusterState clusterStat protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTransform params, PersistentTaskState state) { final String transformId = params.getId(); final DataFrameTransformTask buildTask = (DataFrameTransformTask) task; - final DataFrameTransformState transformState = (DataFrameTransformState) state; + final DataFrameTransformState transformPTaskState = (DataFrameTransformState) state; final DataFrameTransformTask.ClientDataFrameIndexerBuilder indexerBuilder = - new DataFrameTransformTask.ClientDataFrameIndexerBuilder() + new DataFrameTransformTask.ClientDataFrameIndexerBuilder(transformId) .setAuditor(auditor) .setClient(client) - .setIndexerState(currentIndexerState(transformState)) - .setInitialPosition(transformState == null ? null : transformState.getPosition()) - // If the state is `null` that means this is a "first run". We can safely assume the - // task will attempt to gather the initial progress information - // if we have state, this may indicate the previous execution node crashed, so we should attempt to retrieve - // the progress from state to keep an accurate measurement of our progress - .setProgress(transformState == null ? null : transformState.getProgress()) + .setIndexerState(currentIndexerState(transformPTaskState)) + // If the transform persistent task state is `null` that means this is a "first run". + // If we have state then the task has relocated from another node in which case this + // state is preferred + .setInitialPosition(transformPTaskState == null ? null : transformPTaskState.getPosition()) + .setProgress(transformPTaskState == null ? null : transformPTaskState.getProgress()) .setTransformsCheckpointService(dataFrameTransformsCheckpointService) - .setTransformsConfigManager(transformsConfigManager) - .setTransformId(transformId); + .setTransformsConfigManager(transformsConfigManager); ActionListener startTaskListener = ActionListener.wrap( response -> logger.info("Successfully completed and scheduled task in node operation"), failure -> logger.error("Failed to start task ["+ transformId +"] in node operation", failure) ); + Long previousCheckpoint = transformPTaskState != null ? transformPTaskState.getCheckpoint() : null; + // <3> Set the previous stats (if they exist), initialize the indexer, start the task (If it is STOPPED) // Since we don't create the task until `_start` is called, if we see that the task state is stopped, attempt to start // Schedule execution regardless - ActionListener transformStatsActionListener = ActionListener.wrap( - stats -> { - indexerBuilder.setInitialStats(stats); - buildTask.initializeIndexer(indexerBuilder); - startTask(buildTask, startTaskListener); + ActionListener transformStatsActionListener = ActionListener.wrap( + stateAndStats -> { + indexerBuilder.setInitialStats(stateAndStats.getTransformStats()); + if (transformPTaskState == null) { // prefer the persistent task state + indexerBuilder.setInitialPosition(stateAndStats.getTransformState().getPosition()); + indexerBuilder.setProgress(stateAndStats.getTransformState().getProgress()); + } + + final Long checkpoint = previousCheckpoint != null ? previousCheckpoint : stateAndStats.getTransformState().getCheckpoint(); + startTask(buildTask, indexerBuilder, checkpoint, startTaskListener); }, error -> { if (error instanceof ResourceNotFoundException == false) { logger.error("Unable to load previously persisted statistics for transform [" + params.getId() + "]", error); } - indexerBuilder.setInitialStats(new DataFrameIndexerTransformStats(transformId)); - buildTask.initializeIndexer(indexerBuilder); - startTask(buildTask, startTaskListener); + startTask(buildTask, indexerBuilder, previousCheckpoint, startTaskListener); } ); @@ -217,13 +220,17 @@ private void markAsFailed(DataFrameTransformTask task, String reason) { } private void startTask(DataFrameTransformTask buildTask, + DataFrameTransformTask.ClientDataFrameIndexerBuilder indexerBuilder, + Long previousCheckpoint, ActionListener listener) { // If we are stopped, and it is an initial run, this means we have never been started, // attempt to start the task + + buildTask.initializeIndexer(indexerBuilder); + // TODO isInitialRun is false after relocation?? if (buildTask.getState().getTaskState().equals(DataFrameTransformTaskState.STOPPED) && buildTask.isInitialRun()) { logger.info("Data frame transform [{}] created.", buildTask.getTransformId()); - buildTask.start(listener); - + buildTask.start(previousCheckpoint, listener); } else { logger.debug("No need to start task. Its current state is: {}", buildTask.getState().getIndexerState()); listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index ee8767e2235df..9df6b5e3ab337 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -29,9 +29,11 @@ import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction.Response; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.indexing.IndexerState; @@ -181,7 +183,13 @@ boolean isInitialRun() { return getIndexer() != null && getIndexer().initialRun(); } - public synchronized void start(ActionListener listener) { + /** + * Start the background indexer and set the task's state to started + * @param startingCheckpoint Set the current checkpoint to this value. If null the + * current checkpoint is not set + * @param listener Started listener + */ + public synchronized void start(Long startingCheckpoint, ActionListener listener) { if (getIndexer() == null) { listener.onFailure(new ElasticsearchException("Task for transform [{}] not fully initialized. Try again later", getTransformId())); @@ -195,6 +203,9 @@ public synchronized void start(ActionListener listener) { } stateReason.set(null); taskState.set(DataFrameTransformTaskState.STARTED); + if (startingCheckpoint != null) { + currentCheckpoint.set(startingCheckpoint); + } final DataFrameTransformState state = new DataFrameTransformState( DataFrameTransformTaskState.STARTED, @@ -347,6 +358,11 @@ static class ClientDataFrameIndexerBuilder { private Map initialPosition; private DataFrameTransformProgress progress; + ClientDataFrameIndexerBuilder(String transformId) { + this.transformId = transformId; + this.initialStats = new DataFrameIndexerTransformStats(transformId); + } + ClientDataFrameIndexer build(DataFrameTransformTask parentTask) { return new ClientDataFrameIndexer(this.transformId, this.transformsConfigManager, @@ -538,7 +554,9 @@ protected void doSaveState(IndexerState indexerState, Map positi task -> { // Only persist the stats if something has actually changed if (previouslyPersistedStats == null || previouslyPersistedStats.equals(getStats()) == false) { - transformsConfigManager.putOrUpdateTransformStats(getStats(), + transformsConfigManager.putOrUpdateTransformStats( + new DataFrameTransformStateAndStats(transformId, state, getStats(), + DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null ActionListener.wrap( r -> { previouslyPersistedStats = getStats(); @@ -599,7 +617,18 @@ protected void onFinish(ActionListener listener) { protected void onStop() { auditor.info(transformConfig.getId(), "Indexer has stopped"); logger.info("Data frame transform [{}] indexer has stopped", transformConfig.getId()); - transformTask.shutdown(); + transformsConfigManager.putOrUpdateTransformStats( + new DataFrameTransformStateAndStats(transformId, transformTask.getState(), getStats(), + DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null + ActionListener.wrap( + r -> { + transformTask.shutdown(); + }, + statsExc -> { + transformTask.shutdown(); + logger.error("Updating saving stats of transform [" + transformConfig.getId() + "] failed", statsExc); + } + )); } @Override diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java index 36ae4f3f162a0..9c7af3efa5333 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java @@ -14,12 +14,17 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointTests; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStatsTests; import org.elasticsearch.xpack.dataframe.DataFrameSingleNodeTestCase; import org.junit.Before; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.List; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -217,4 +222,40 @@ public void testExpandIds() throws Exception { }); } + + public void testStateAndStats() throws InterruptedException { + String transformId = "transform_test_stats_create_read_update"; + + DataFrameTransformStateAndStats stateAndStats = + DataFrameTransformStateAndStatsTests.randomDataFrameTransformStateAndStats(transformId); + + assertAsync(listener -> transformsConfigManager.putOrUpdateTransformStats(stateAndStats, listener), Boolean.TRUE, null, null); + assertAsync(listener -> transformsConfigManager.getTransformStats(transformId, listener), stateAndStats, null, null); + + DataFrameTransformStateAndStats updated = + DataFrameTransformStateAndStatsTests.randomDataFrameTransformStateAndStats(transformId); + assertAsync(listener -> transformsConfigManager.putOrUpdateTransformStats(updated, listener), Boolean.TRUE, null, null); + assertAsync(listener -> transformsConfigManager.getTransformStats(transformId, listener), updated, null, null); + } + + public void testGetStateAndStatsMultiple() throws InterruptedException { + int numStats = randomInt(5); + List expectedStats = new ArrayList<>(); + for (int i=0; i transformsConfigManager.putOrUpdateTransformStats(stat, listener), Boolean.TRUE, null, null); + } + + // remove one of the put stats so we don't retrieve all + if (expectedStats.size() > 1) { + expectedStats.remove(expectedStats.size() -1); + } + List ids = expectedStats.stream().map(DataFrameTransformStateAndStats::getId).collect(Collectors.toList()); + + // get stats will be ordered by id + expectedStats.sort(Comparator.comparing(DataFrameTransformStateAndStats::getId)); + assertAsync(listener -> transformsConfigManager.getTransformStats(ids, listener), expectedStats, null, null); + } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index 8b30fd1186b5b..a475c3ceadca6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -114,8 +114,8 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.indexer_state: "stopped" } - - match: { transforms.0.state.task_state: "stopped" } +# - match: { transforms.0.state.indexer_state: "stopped" } +# - match: { transforms.0.state.task_state: "stopped" } - do: data_frame.start_data_frame_transform: @@ -206,47 +206,3 @@ teardown: - do: data_frame.delete_data_frame_transform: transform_id: "airline-transform-start-later" - ---- -"Test stop all": - - do: - data_frame.put_data_frame_transform: - transform_id: "airline-transform-stop-all" - body: > - { - "source": { "index": "airline-data" }, - "dest": { "index": "airline-data-start-later" }, - "pivot": { - "group_by": { "airline": {"terms": {"field": "airline"}}}, - "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} - } - } - - do: - data_frame.start_data_frame_transform: - transform_id: "airline-transform-stop-all" - - match: { started: true } - - - do: - data_frame.start_data_frame_transform: - transform_id: "airline-transform-start-stop" - - match: { started: true } - - - do: - data_frame.stop_data_frame_transform: - transform_id: "_all" - wait_for_completion: true - - - match: { stopped: true } - - - do: - data_frame.get_data_frame_transform_stats: - transform_id: "*" - - match: { count: 2 } - - match: { transforms.0.state.indexer_state: "stopped" } - - match: { transforms.0.state.task_state: "stopped" } - - match: { transforms.1.state.indexer_state: "stopped" } - - match: { transforms.1.state.task_state: "stopped" } - - - do: - data_frame.delete_data_frame_transform: - transform_id: "airline-transform-stop-all" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index bedeea18a1545..93c942f0733a8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -50,15 +50,15 @@ teardown: - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - match: { transforms.0.state.checkpoint: 0 } - - match: { transforms.0.stats.pages_processed: 0 } + - lte: { transforms.0.stats.pages_processed: 1 } - match: { transforms.0.stats.documents_processed: 0 } - match: { transforms.0.stats.documents_indexed: 0 } - match: { transforms.0.stats.trigger_count: 1 } - match: { transforms.0.stats.index_time_in_ms: 0 } - match: { transforms.0.stats.index_total: 0 } - match: { transforms.0.stats.index_failures: 0 } - - match: { transforms.0.stats.search_time_in_ms: 0 } - - match: { transforms.0.stats.search_total: 0 } + - gte: { transforms.0.stats.search_time_in_ms: 0 } + - lte: { transforms.0.stats.search_total: 1 } - match: { transforms.0.stats.search_failures: 0 } --- From d150880c7830d30a82f101822376e651e83d380c Mon Sep 17 00:00:00 2001 From: Ed Savage <32410745+edsavage@users.noreply.github.com> Date: Tue, 21 May 2019 18:41:59 +0100 Subject: [PATCH 156/321] [ML][TEST] Fix limits in AutodetectMemoryLimitIT (#42279) Re-enable muted tests and accommodate recent backend changes that result in higher memory usage being reported for a job at the start of its life-cycle --- .../xpack/ml/integration/AutodetectMemoryLimitIT.java | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java index 2f00591420520..98d089e544b4e 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java @@ -84,7 +84,6 @@ public void testTooManyPartitions() throws Exception { assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42207") public void testTooManyByFields() throws Exception { Detector.Builder detector = new Detector.Builder("count", null); detector.setByFieldName("user"); @@ -125,12 +124,11 @@ public void testTooManyByFields() throws Exception { // Assert we haven't violated the limit too much GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); - assertThat(modelSizeStats.getModelBytes(), lessThan(31500000L)); + assertThat(modelSizeStats.getModelBytes(), lessThan(35000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(25000000L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42207") public void testTooManyByAndOverFields() throws Exception { Detector.Builder detector = new Detector.Builder("count", null); detector.setByFieldName("department"); @@ -175,12 +173,11 @@ public void testTooManyByAndOverFields() throws Exception { // Assert we haven't violated the limit too much GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); - assertThat(modelSizeStats.getModelBytes(), lessThan(31500000L)); + assertThat(modelSizeStats.getModelBytes(), lessThan(33000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(24000000L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42207") public void testManyDistinctOverFields() throws Exception { Detector.Builder detector = new Detector.Builder("sum", "value"); detector.setOverFieldName("user"); @@ -226,7 +223,7 @@ public void testManyDistinctOverFields() throws Exception { // Assert we haven't violated the limit too much GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); - assertThat(modelSizeStats.getModelBytes(), lessThan(116000000L)); + assertThat(modelSizeStats.getModelBytes(), lessThan(117000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(90000000L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } From 5f9c8ba46528032bfb0bb1058be682b88bbf4bd1 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 21 May 2019 13:45:58 -0400 Subject: [PATCH 157/321] Avoid unnecessary persistence of retention leases (#42299) Today we are persisting the retention leases at least every thirty seconds by a scheduled background sync. This sync causes an fsync to disk and when there are a large number of shards allocated to slow disks, these fsyncs can pile up and can severely impact the system. This commit addresses this by only persisting and fsyncing the retention leases if they have changed since the last time that we persisted and fsynced the retention leases. --- .../index/seqno/ReplicationTracker.java | 22 +++++++++- .../index/seqno/RetentionLeases.java | 22 ++++++++-- ...ReplicationTrackerRetentionLeaseTests.java | 43 +++++++++++++++++++ .../index/seqno/RetentionLeasesTests.java | 4 ++ 4 files changed, 85 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 437e7934088e7..892056674019f 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.ObjectLongHashMap; import com.carrotsearch.hppc.ObjectLongMap; - import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -181,6 +180,18 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L */ private RetentionLeases retentionLeases = RetentionLeases.EMPTY; + /** + * The primary term of the most-recently persisted retention leases. This is used to check if we need to persist the current retention + * leases. + */ + private long persistedRetentionLeasesPrimaryTerm; + + /** + * The version of the most-recently persisted retention leases. This is used to check if we need to persist the current retention + * leases. + */ + private long persistedRetentionLeasesVersion; + /** * Get all retention leases tracked on this shard. * @@ -343,7 +354,8 @@ public RetentionLeases loadRetentionLeases(final Path path) throws IOException { private final Object retentionLeasePersistenceLock = new Object(); /** - * Persists the current retention leases to their dedicated state file. + * Persists the current retention leases to their dedicated state file. If this version of the retention leases are already persisted + * then persistence is skipped. * * @param path the path to the directory containing the state file * @throws WriteStateException if an exception occurs writing the state file @@ -352,10 +364,16 @@ public void persistRetentionLeases(final Path path) throws WriteStateException { synchronized (retentionLeasePersistenceLock) { final RetentionLeases currentRetentionLeases; synchronized (this) { + if (retentionLeases.supersedes(persistedRetentionLeasesPrimaryTerm, persistedRetentionLeasesVersion) == false) { + logger.trace("skipping persisting retention leases [{}], already persisted", retentionLeases); + return; + } currentRetentionLeases = retentionLeases; } logger.trace("persisting retention leases [{}]", currentRetentionLeases); RetentionLeases.FORMAT.writeAndCleanup(currentRetentionLeases, path); + persistedRetentionLeasesPrimaryTerm = currentRetentionLeases.primaryTerm(); + persistedRetentionLeasesVersion = currentRetentionLeases.version(); } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java index 7c3b9e3c7b9c9..81fd7e2fce047 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java @@ -70,13 +70,27 @@ public long version() { /** * Checks if this retention leases collection supersedes the specified retention leases collection. A retention leases collection - * supersedes another retention leases collection if its primary term is higher, or if for equal primary terms its version is higher + * supersedes another retention leases collection if its primary term is higher, or if for equal primary terms its version is higher. * * @param that the retention leases collection to test against * @return true if this retention leases collection supercedes the specified retention lease collection, otherwise false */ - public boolean supersedes(final RetentionLeases that) { - return primaryTerm > that.primaryTerm || primaryTerm == that.primaryTerm && version > that.version; + boolean supersedes(final RetentionLeases that) { + return supersedes(that.primaryTerm, that.version); + } + + /** + * Checks if this retention leases collection would supersede a retention leases collection with the specified primary term and version. + * A retention leases collection supersedes another retention leases collection if its primary term is higher, or if for equal primary + * terms its version is higher. + * + * @param primaryTerm the primary term + * @param version the version + * @return true if this retention leases collection would supercedes a retention lease collection with the specified primary term and + * version + */ + boolean supersedes(final long primaryTerm, final long version) { + return this.primaryTerm > primaryTerm || this.primaryTerm == primaryTerm && this.version > version; } private final Map leases; @@ -203,7 +217,7 @@ public static RetentionLeases fromXContent(final XContentParser parser) { return PARSER.apply(parser, null); } - static final MetaDataStateFormat FORMAT = new MetaDataStateFormat("retention-leases-") { + static final MetaDataStateFormat FORMAT = new MetaDataStateFormat<>("retention-leases-") { @Override public void toXContent(final XContentBuilder builder, final RetentionLeases retentionLeases) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index d1bd5712dbadc..2334cb4330887 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; @@ -489,6 +490,48 @@ public void testLoadAndPersistRetentionLeases() throws IOException { assertThat(replicationTracker.loadRetentionLeases(path), equalTo(replicationTracker.getRetentionLeases())); } + public void testUnnecessaryPersistenceOfRetentionLeases() throws IOException { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId)); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final int length = randomIntBetween(0, 8); + for (int i = 0; i < length; i++) { + if (rarely() && primaryTerm < Long.MAX_VALUE) { + primaryTerm = randomLongBetween(primaryTerm + 1, Long.MAX_VALUE); + replicationTracker.setOperationPrimaryTerm(primaryTerm); + } + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + replicationTracker.addRetentionLease( + Integer.toString(i), retainingSequenceNumber, "test-" + i, ActionListener.wrap(() -> {})); + } + + final Path path = createTempDir(); + replicationTracker.persistRetentionLeases(path); + + final Tuple retentionLeasesWithGeneration = + RetentionLeases.FORMAT.loadLatestStateWithGeneration(logger, NamedXContentRegistry.EMPTY, path); + + replicationTracker.persistRetentionLeases(path); + final Tuple retentionLeasesWithGenerationAfterUnnecessaryPersistence = + RetentionLeases.FORMAT.loadLatestStateWithGeneration(logger, NamedXContentRegistry.EMPTY, path); + + assertThat(retentionLeasesWithGenerationAfterUnnecessaryPersistence.v1(), equalTo(retentionLeasesWithGeneration.v1())); + assertThat(retentionLeasesWithGenerationAfterUnnecessaryPersistence.v2(), equalTo(retentionLeasesWithGeneration.v2())); + } + /** * Test that we correctly synchronize writing the retention lease state file in {@link ReplicationTracker#persistRetentionLeases(Path)}. * This test can fail without the synchronization block in that method. diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java index 28444c7825e4d..c63b2ebb6645b 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java @@ -60,7 +60,9 @@ public void testSupersedesByPrimaryTerm() { final long higherPrimaryTerm = randomLongBetween(lowerPrimaryTerm + 1, Long.MAX_VALUE); final RetentionLeases right = new RetentionLeases(higherPrimaryTerm, randomLongBetween(1, Long.MAX_VALUE), Collections.emptyList()); assertTrue(right.supersedes(left)); + assertTrue(right.supersedes(left.primaryTerm(), left.version())); assertFalse(left.supersedes(right)); + assertFalse(left.supersedes(right.primaryTerm(), right.version())); } public void testSupersedesByVersion() { @@ -70,7 +72,9 @@ public void testSupersedesByVersion() { final RetentionLeases left = new RetentionLeases(primaryTerm, lowerVersion, Collections.emptyList()); final RetentionLeases right = new RetentionLeases(primaryTerm, higherVersion, Collections.emptyList()); assertTrue(right.supersedes(left)); + assertTrue(right.supersedes(left.primaryTerm(), left.version())); assertFalse(left.supersedes(right)); + assertFalse(left.supersedes(right.primaryTerm(), right.version())); } public void testRetentionLeasesRejectsDuplicates() { From c59fbb3358d003640e80ba9bda52fca91063e4f2 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Tue, 21 May 2019 13:47:47 -0400 Subject: [PATCH 158/321] Reorganize Painless doc structure (#42303) --- docs/painless/index.asciidoc | 2 +- docs/painless/painless-contexts.asciidoc | 2 - .../painless/painless-contexts/index.asciidoc | 2 + ...ption.asciidoc => painless-guide.asciidoc} | 19 +++++--- docs/painless/painless-guide/index.asciidoc | 7 +++ .../painless-debugging.asciidoc | 0 .../painless-execute-script.asciidoc | 0 .../painless-method-dispatch.asciidoc | 30 ++++++++++++ .../painless-walkthrough.asciidoc} | 48 ++----------------- docs/painless/painless-lang-spec.asciidoc | 36 +------------- .../painless-lang-spec/index.asciidoc | 35 ++++++++++++++ .../painless-casting.asciidoc | 0 .../painless-comments.asciidoc | 0 .../painless-functions.asciidoc | 0 .../painless-identifiers.asciidoc | 0 .../painless-keywords.asciidoc | 0 .../painless-lambdas.asciidoc | 0 .../painless-literals.asciidoc | 0 .../painless-operators-array.asciidoc | 0 .../painless-operators-boolean.asciidoc | 0 .../painless-operators-general.asciidoc | 0 .../painless-operators-numeric.asciidoc | 0 .../painless-operators-reference.asciidoc | 0 .../painless-operators.asciidoc | 0 .../painless-regexes.asciidoc | 0 .../painless-scripts.asciidoc | 0 .../painless-statements.asciidoc | 0 .../painless-types.asciidoc | 0 .../painless-variables.asciidoc | 0 docs/painless/painless-xref.asciidoc | 2 - docs/reference/ingest/ingest-node.asciidoc | 2 +- .../modules/scripting/painless.asciidoc | 29 ++++++++++- 32 files changed, 120 insertions(+), 94 deletions(-) rename docs/painless/{painless-description.asciidoc => painless-guide.asciidoc} (56%) create mode 100644 docs/painless/painless-guide/index.asciidoc rename docs/painless/{ => painless-guide}/painless-debugging.asciidoc (100%) rename docs/painless/{ => painless-guide}/painless-execute-script.asciidoc (100%) create mode 100644 docs/painless/painless-guide/painless-method-dispatch.asciidoc rename docs/painless/{painless-getting-started.asciidoc => painless-guide/painless-walkthrough.asciidoc} (83%) create mode 100644 docs/painless/painless-lang-spec/index.asciidoc rename docs/painless/{ => painless-lang-spec}/painless-casting.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-comments.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-functions.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-identifiers.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-keywords.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-lambdas.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-literals.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators-array.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators-boolean.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators-general.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators-numeric.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators-reference.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-operators.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-regexes.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-scripts.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-statements.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-types.asciidoc (100%) rename docs/painless/{ => painless-lang-spec}/painless-variables.asciidoc (100%) delete mode 100644 docs/painless/painless-xref.asciidoc diff --git a/docs/painless/index.asciidoc b/docs/painless/index.asciidoc index 92e0a33bf1347..c41899bbd98da 100644 --- a/docs/painless/index.asciidoc +++ b/docs/painless/index.asciidoc @@ -3,7 +3,7 @@ include::../Versions.asciidoc[] -include::painless-getting-started.asciidoc[] +include::painless-guide.asciidoc[] include::painless-lang-spec.asciidoc[] diff --git a/docs/painless/painless-contexts.asciidoc b/docs/painless/painless-contexts.asciidoc index 7c342a3da7a5a..ccc9e3ac4db24 100644 --- a/docs/painless/painless-contexts.asciidoc +++ b/docs/painless/painless-contexts.asciidoc @@ -54,6 +54,4 @@ specialized code may define new ways to use a Painless script. | {xpack-ref}/transform-script.html[Elasticsearch Documentation] |==== -include::painless-contexts/painless-context-examples.asciidoc[] - include::painless-contexts/index.asciidoc[] diff --git a/docs/painless/painless-contexts/index.asciidoc b/docs/painless/painless-contexts/index.asciidoc index 0c8c21c06a9be..11b4c9993374e 100644 --- a/docs/painless/painless-contexts/index.asciidoc +++ b/docs/painless/painless-contexts/index.asciidoc @@ -1,3 +1,5 @@ +include::painless-context-examples.asciidoc[] + include::painless-ingest-processor-context.asciidoc[] include::painless-update-context.asciidoc[] diff --git a/docs/painless/painless-description.asciidoc b/docs/painless/painless-guide.asciidoc similarity index 56% rename from docs/painless/painless-description.asciidoc rename to docs/painless/painless-guide.asciidoc index dfaf66ca26d4b..5e926498088ab 100644 --- a/docs/painless/painless-description.asciidoc +++ b/docs/painless/painless-guide.asciidoc @@ -1,11 +1,14 @@ +[[painless-guide]] +== Painless Guide + _Painless_ is a simple, secure scripting language designed specifically for use with Elasticsearch. It is the default scripting language for Elasticsearch and -can safely be used for inline and stored scripts. For a detailed description of -the Painless syntax and language features, see the -{painless}/painless-lang-spec.html[Painless Language Specification]. +can safely be used for inline and stored scripts. For a jump start into +Painless, see <>. For a +detailed description of the Painless syntax and language features, see the +<>. -[[painless-features]] -You can use Painless anywhere scripts can be used in Elasticsearch. Painless +You can use Painless anywhere scripts are used in Elasticsearch. Painless provides: * Fast performance: Painless scripts https://benchmarks.elastic.co/index.html#search_qps_scripts[ @@ -18,7 +21,9 @@ complete list of available classes and methods. * Optional typing: Variables and parameters can use explicit types or the dynamic `def` type. -* Syntax: Extends Java's syntax to provide http://groovy-lang.org/index.html[ -Groovy-style] scripting language features that make scripts easier to write. +* Syntax: Extends a subset of Java's syntax to provide additional scripting +language features. * Optimizations: Designed specifically for Elasticsearch scripting. + +include::painless-guide/index.asciidoc[] \ No newline at end of file diff --git a/docs/painless/painless-guide/index.asciidoc b/docs/painless/painless-guide/index.asciidoc new file mode 100644 index 0000000000000..b45406a4e7273 --- /dev/null +++ b/docs/painless/painless-guide/index.asciidoc @@ -0,0 +1,7 @@ +include::painless-walkthrough.asciidoc[] + +include::painless-method-dispatch.asciidoc[] + +include::painless-debugging.asciidoc[] + +include::painless-execute-script.asciidoc[] diff --git a/docs/painless/painless-debugging.asciidoc b/docs/painless/painless-guide/painless-debugging.asciidoc similarity index 100% rename from docs/painless/painless-debugging.asciidoc rename to docs/painless/painless-guide/painless-debugging.asciidoc diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-guide/painless-execute-script.asciidoc similarity index 100% rename from docs/painless/painless-execute-script.asciidoc rename to docs/painless/painless-guide/painless-execute-script.asciidoc diff --git a/docs/painless/painless-guide/painless-method-dispatch.asciidoc b/docs/painless/painless-guide/painless-method-dispatch.asciidoc new file mode 100644 index 0000000000000..0f7d0423174b5 --- /dev/null +++ b/docs/painless/painless-guide/painless-method-dispatch.asciidoc @@ -0,0 +1,30 @@ +[[modules-scripting-painless-dispatch]] +=== How painless dispatches functions + +Painless uses receiver, name, and https://en.wikipedia.org/wiki/Arity[arity] +for method dispatch. For example, `s.foo(a, b)` is resolved by first getting +the class of `s` and then looking up the method `foo` with two parameters. This +is different from Groovy which uses the +https://en.wikipedia.org/wiki/Multiple_dispatch[runtime types] of the +parameters and Java which uses the compile time types of the parameters. + +The consequence of this that Painless doesn't support overloaded methods like +Java, leading to some trouble when it whitelists classes from the Java +standard library. For example, in Java and Groovy, `Matcher` has two methods: +`group(int)` and `group(String)`. Painless can't whitelist both of these methods +because they have the same name and the same number of parameters. So instead it +has `group(int)` and `namedGroup(String)`. + +We have a few justifications for this different way of dispatching methods: + +1. It makes operating on `def` types simpler and, presumably, faster. Using +receiver, name, and arity means that when Painless sees a call on a `def` object it +can dispatch the appropriate method without having to do expensive comparisons +of the types of the parameters. The same is true for invocations with `def` +typed parameters. +2. It keeps things consistent. It would be genuinely weird for Painless to +behave like Groovy if any `def` typed parameters were involved and Java +otherwise. It'd be slow for it to behave like Groovy all the time. +3. It keeps Painless maintainable. Adding the Java or Groovy like method +dispatch *feels* like it'd add a ton of complexity which'd make maintenance and +other improvements much more difficult. diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-guide/painless-walkthrough.asciidoc similarity index 83% rename from docs/painless/painless-getting-started.asciidoc rename to docs/painless/painless-guide/painless-walkthrough.asciidoc index f562033471e31..70089a08726d2 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-guide/painless-walkthrough.asciidoc @@ -1,10 +1,5 @@ -[[painless-getting-started]] -== Getting Started with Painless - -include::painless-description.asciidoc[] - -[[painless-examples]] -=== Painless Examples +[[painless-walkthrough]] +=== A Brief Painless Walkthrough To illustrate how Painless works, let's load some hockey stats into an Elasticsearch index: @@ -121,7 +116,7 @@ GET hockey/_search [float] -===== Missing values +==== Missing values `doc['field'].value` throws an exception if the field is missing in a document. @@ -198,7 +193,7 @@ POST hockey/_update/1 ==== Dates Date fields are exposed as -`ReadableDateTime`, so they support methods like `getYear`, `getDayOfWeek` +`ZonedDateTime`, so they support methods like `getYear`, `getDayOfWeek` or e.g. getting milliseconds since epoch with `getMillis`. To use these in a script, leave out the `get` prefix and continue with lowercasing the rest of the method name. For example, the following returns every hockey @@ -365,38 +360,3 @@ Note: all of the `_update_by_query` examples above could really do with a {ref}/query-dsl-script-query.html[script query] it wouldn't be as efficient as using any other query because script queries aren't able to use the inverted index to limit the documents that they have to check. - -[[modules-scripting-painless-dispatch]] -=== How painless dispatches functions - -Painless uses receiver, name, and https://en.wikipedia.org/wiki/Arity[arity] -for method dispatch. For example, `s.foo(a, b)` is resolved by first getting -the class of `s` and then looking up the method `foo` with two parameters. This -is different from Groovy which uses the -https://en.wikipedia.org/wiki/Multiple_dispatch[runtime types] of the -parameters and Java which uses the compile time types of the parameters. - -The consequence of this that Painless doesn't support overloaded methods like -Java, leading to some trouble when it whitelists classes from the Java -standard library. For example, in Java and Groovy, `Matcher` has two methods: -`group(int)` and `group(String)`. Painless can't whitelist both of these methods -because they have the same name and the same number of parameters. So instead it -has `group(int)` and `namedGroup(String)`. - -We have a few justifications for this different way of dispatching methods: - -1. It makes operating on `def` types simpler and, presumably, faster. Using -receiver, name, and arity means that when Painless sees a call on a `def` object it -can dispatch the appropriate method without having to do expensive comparisons -of the types of the parameters. The same is true for invocations with `def` -typed parameters. -2. It keeps things consistent. It would be genuinely weird for Painless to -behave like Groovy if any `def` typed parameters were involved and Java -otherwise. It'd be slow for it to behave like Groovy all the time. -3. It keeps Painless maintainable. Adding the Java or Groovy like method -dispatch *feels* like it'd add a ton of complexity which'd make maintenance and -other improvements much more difficult. - -include::painless-debugging.asciidoc[] - -include::painless-execute-script.asciidoc[] diff --git a/docs/painless/painless-lang-spec.asciidoc b/docs/painless/painless-lang-spec.asciidoc index d50f3db2dc0d3..2f108c73732eb 100644 --- a/docs/painless/painless-lang-spec.asciidoc +++ b/docs/painless/painless-lang-spec.asciidoc @@ -17,38 +17,4 @@ into Java Virtual Machine (JVM) byte code and executed against a standard JVM. This specification uses ANTLR4 grammar notation to describe the allowed syntax. However, the actual Painless grammar is more compact than what is shown here. -include::painless-comments.asciidoc[] - -include::painless-keywords.asciidoc[] - -include::painless-literals.asciidoc[] - -include::painless-identifiers.asciidoc[] - -include::painless-variables.asciidoc[] - -include::painless-types.asciidoc[] - -include::painless-casting.asciidoc[] - -include::painless-operators.asciidoc[] - -include::painless-operators-general.asciidoc[] - -include::painless-operators-numeric.asciidoc[] - -include::painless-operators-boolean.asciidoc[] - -include::painless-operators-reference.asciidoc[] - -include::painless-operators-array.asciidoc[] - -include::painless-statements.asciidoc[] - -include::painless-scripts.asciidoc[] - -include::painless-functions.asciidoc[] - -include::painless-lambdas.asciidoc[] - -include::painless-regexes.asciidoc[] +include::painless-lang-spec/index.asciidoc[] \ No newline at end of file diff --git a/docs/painless/painless-lang-spec/index.asciidoc b/docs/painless/painless-lang-spec/index.asciidoc new file mode 100644 index 0000000000000..e75264ff3e4e1 --- /dev/null +++ b/docs/painless/painless-lang-spec/index.asciidoc @@ -0,0 +1,35 @@ +include::painless-comments.asciidoc[] + +include::painless-keywords.asciidoc[] + +include::painless-literals.asciidoc[] + +include::painless-identifiers.asciidoc[] + +include::painless-variables.asciidoc[] + +include::painless-types.asciidoc[] + +include::painless-casting.asciidoc[] + +include::painless-operators.asciidoc[] + +include::painless-operators-general.asciidoc[] + +include::painless-operators-numeric.asciidoc[] + +include::painless-operators-boolean.asciidoc[] + +include::painless-operators-reference.asciidoc[] + +include::painless-operators-array.asciidoc[] + +include::painless-statements.asciidoc[] + +include::painless-scripts.asciidoc[] + +include::painless-functions.asciidoc[] + +include::painless-lambdas.asciidoc[] + +include::painless-regexes.asciidoc[] diff --git a/docs/painless/painless-casting.asciidoc b/docs/painless/painless-lang-spec/painless-casting.asciidoc similarity index 100% rename from docs/painless/painless-casting.asciidoc rename to docs/painless/painless-lang-spec/painless-casting.asciidoc diff --git a/docs/painless/painless-comments.asciidoc b/docs/painless/painless-lang-spec/painless-comments.asciidoc similarity index 100% rename from docs/painless/painless-comments.asciidoc rename to docs/painless/painless-lang-spec/painless-comments.asciidoc diff --git a/docs/painless/painless-functions.asciidoc b/docs/painless/painless-lang-spec/painless-functions.asciidoc similarity index 100% rename from docs/painless/painless-functions.asciidoc rename to docs/painless/painless-lang-spec/painless-functions.asciidoc diff --git a/docs/painless/painless-identifiers.asciidoc b/docs/painless/painless-lang-spec/painless-identifiers.asciidoc similarity index 100% rename from docs/painless/painless-identifiers.asciidoc rename to docs/painless/painless-lang-spec/painless-identifiers.asciidoc diff --git a/docs/painless/painless-keywords.asciidoc b/docs/painless/painless-lang-spec/painless-keywords.asciidoc similarity index 100% rename from docs/painless/painless-keywords.asciidoc rename to docs/painless/painless-lang-spec/painless-keywords.asciidoc diff --git a/docs/painless/painless-lambdas.asciidoc b/docs/painless/painless-lang-spec/painless-lambdas.asciidoc similarity index 100% rename from docs/painless/painless-lambdas.asciidoc rename to docs/painless/painless-lang-spec/painless-lambdas.asciidoc diff --git a/docs/painless/painless-literals.asciidoc b/docs/painless/painless-lang-spec/painless-literals.asciidoc similarity index 100% rename from docs/painless/painless-literals.asciidoc rename to docs/painless/painless-lang-spec/painless-literals.asciidoc diff --git a/docs/painless/painless-operators-array.asciidoc b/docs/painless/painless-lang-spec/painless-operators-array.asciidoc similarity index 100% rename from docs/painless/painless-operators-array.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-array.asciidoc diff --git a/docs/painless/painless-operators-boolean.asciidoc b/docs/painless/painless-lang-spec/painless-operators-boolean.asciidoc similarity index 100% rename from docs/painless/painless-operators-boolean.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-boolean.asciidoc diff --git a/docs/painless/painless-operators-general.asciidoc b/docs/painless/painless-lang-spec/painless-operators-general.asciidoc similarity index 100% rename from docs/painless/painless-operators-general.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-general.asciidoc diff --git a/docs/painless/painless-operators-numeric.asciidoc b/docs/painless/painless-lang-spec/painless-operators-numeric.asciidoc similarity index 100% rename from docs/painless/painless-operators-numeric.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-numeric.asciidoc diff --git a/docs/painless/painless-operators-reference.asciidoc b/docs/painless/painless-lang-spec/painless-operators-reference.asciidoc similarity index 100% rename from docs/painless/painless-operators-reference.asciidoc rename to docs/painless/painless-lang-spec/painless-operators-reference.asciidoc diff --git a/docs/painless/painless-operators.asciidoc b/docs/painless/painless-lang-spec/painless-operators.asciidoc similarity index 100% rename from docs/painless/painless-operators.asciidoc rename to docs/painless/painless-lang-spec/painless-operators.asciidoc diff --git a/docs/painless/painless-regexes.asciidoc b/docs/painless/painless-lang-spec/painless-regexes.asciidoc similarity index 100% rename from docs/painless/painless-regexes.asciidoc rename to docs/painless/painless-lang-spec/painless-regexes.asciidoc diff --git a/docs/painless/painless-scripts.asciidoc b/docs/painless/painless-lang-spec/painless-scripts.asciidoc similarity index 100% rename from docs/painless/painless-scripts.asciidoc rename to docs/painless/painless-lang-spec/painless-scripts.asciidoc diff --git a/docs/painless/painless-statements.asciidoc b/docs/painless/painless-lang-spec/painless-statements.asciidoc similarity index 100% rename from docs/painless/painless-statements.asciidoc rename to docs/painless/painless-lang-spec/painless-statements.asciidoc diff --git a/docs/painless/painless-types.asciidoc b/docs/painless/painless-lang-spec/painless-types.asciidoc similarity index 100% rename from docs/painless/painless-types.asciidoc rename to docs/painless/painless-lang-spec/painless-types.asciidoc diff --git a/docs/painless/painless-variables.asciidoc b/docs/painless/painless-lang-spec/painless-variables.asciidoc similarity index 100% rename from docs/painless/painless-variables.asciidoc rename to docs/painless/painless-lang-spec/painless-variables.asciidoc diff --git a/docs/painless/painless-xref.asciidoc b/docs/painless/painless-xref.asciidoc deleted file mode 100644 index 86407b3e697d6..0000000000000 --- a/docs/painless/painless-xref.asciidoc +++ /dev/null @@ -1,2 +0,0 @@ -Ready to start scripting with Painless? See {painless}/painless-getting-started.html[Getting Started with Painless] in the guide to the -{painless}/painless.html[Painless Scripting Language]. \ No newline at end of file diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 1f8abc5675db9..b1a92222bec59 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -563,7 +563,7 @@ template for all indexes that hold data that needs pre-index processing. [[conditionals-with-regex]] === Conditionals with the Regular Expressions The `if` conditional is implemented as a Painless script, which requires -{painless}//painless-examples.html#modules-scripting-painless-regex[explicit support for regular expressions]. +{painless}//painless-regexes.html[explicit support for regular expressions]. `script.painless.regex.enabled: true` must be set in `elasticsearch.yml` to use regular expressions in the `if` condition. diff --git a/docs/reference/modules/scripting/painless.asciidoc b/docs/reference/modules/scripting/painless.asciidoc index ac48aad73d28f..6dd9b50db51ed 100644 --- a/docs/reference/modules/scripting/painless.asciidoc +++ b/docs/reference/modules/scripting/painless.asciidoc @@ -1,7 +1,32 @@ [[modules-scripting-painless]] === Painless Scripting Language -include::../../../painless/painless-description.asciidoc[] +_Painless_ is a simple, secure scripting language designed specifically for use +with Elasticsearch. It is the default scripting language for Elasticsearch and +can safely be used for inline and stored scripts. To get started with +Painless, see the {painless}/painless-guide.html[Painless Guide]. For a +detailed description of the Painless syntax and language features, see the +{painless}/painless-lang-spec.html[Painless Language Specification]. -Ready to start scripting with Painless? See {painless}/painless-getting-started.html[Getting Started with Painless] in the guide to the +[[painless-features]] +You can use Painless anywhere scripts can be used in Elasticsearch. Painless +provides: + +* Fast performance: Painless scripts https://benchmarks.elastic.co/index.html#search_qps_scripts[ +run several times faster] than the alternatives. + +* Safety: Fine-grained whitelist with method call/field granularity. See the +{painless}/painless-api-reference.html[Painless API Reference] for a +complete list of available classes and methods. + +* Optional typing: Variables and parameters can use explicit types or the +dynamic `def` type. + +* Syntax: Extends a subset of Java's syntax to provide additional scripting +language features. + +* Optimizations: Designed specifically for Elasticsearch scripting. + +Ready to start scripting with Painless? See the +{painless}/painless-guide.html[Painless Guide] for the {painless}/index.html[Painless Scripting Language]. \ No newline at end of file From be412ca83f0d4644712a7baa8a6330b1e72b2858 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 21 May 2019 19:49:53 +0200 Subject: [PATCH 159/321] Remove Dead Code from Azure Repo Plugin (#42178) * None of this stuff is used --- .../repositories/azure/AzureBlobStore.java | 7 +----- .../repositories/azure/AzureRepository.java | 11 ++------- .../azure/AzureStorageService.java | 24 +++---------------- .../azure/AzureStorageSettings.java | 12 ++-------- .../repositories/azure/SocketAccess.java | 2 +- .../azure/AzureBlobStoreContainerTests.java | 15 ++++-------- .../azure/AzureBlobStoreTests.java | 16 ++++--------- .../azure/AzureStorageServiceMock.java | 14 ++--------- 8 files changed, 19 insertions(+), 82 deletions(-) diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 697125fbd537d..7eeadc7f6475b 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -45,8 +45,7 @@ public class AzureBlobStore implements BlobStore { private final String container; private final LocationMode locationMode; - public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service) - throws URISyntaxException, StorageException { + public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service) { this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.clientName = Repository.CLIENT_NAME.get(metadata.settings()); this.service = service; @@ -69,10 +68,6 @@ public LocationMode getLocationMode() { return locationMode; } - public String getClientName() { - return clientName; - } - @Override public BlobContainer blobContainer(BlobPath path) { return new AzureBlobContainer(path, this); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 33ee9b64c2683..7c3520918fc58 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -112,20 +112,16 @@ public AzureRepository(RepositoryMetaData metadata, Environment environment, Nam } } - // only use for testing @Override protected BlobStore getBlobStore() { return super.getBlobStore(); } - /** - * {@inheritDoc} - */ @Override - protected AzureBlobStore createBlobStore() throws URISyntaxException, StorageException { + protected AzureBlobStore createBlobStore() { final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService); - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", blobStore, chunkSize, isCompress(), basePath)); return blobStore; @@ -136,9 +132,6 @@ protected BlobPath basePath() { return basePath; } - /** - * {@inheritDoc} - */ @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 0d18592b8a7bb..89a78fd8045ee 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -97,7 +97,7 @@ public Tuple> client(String clientNa } } - protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + private static CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { final CloudBlobClient client = createClient(azureStorageSettings); // Set timeout option if the user sets cloud.azure.storage.timeout or // cloud.azure.storage.xxx.timeout (it's negative by default) @@ -115,12 +115,12 @@ protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) return client; } - protected CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + private static CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { final String connectionString = azureStorageSettings.buildConnectionString(); return CloudStorageAccount.parse(connectionString).createCloudBlobClient(); } - protected OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { + private static OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { final OperationContext context = new OperationContext(); context.setProxy(azureStorageSettings.getProxy()); return context; @@ -146,24 +146,6 @@ public boolean doesContainerExist(String account, String container) throws URISy return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get())); } - public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { - final Tuple> client = client(account); - // container name must be lower case. - logger.trace(() -> new ParameterizedMessage("delete files container [{}], path [{}]", container, path)); - SocketAccess.doPrivilegedVoidException(() -> { - // list the blobs using a flat blob listing mode - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null, - client.v2().get())) { - final String blobName = blobNameFromUri(blobItem.getUri()); - logger.trace(() -> new ParameterizedMessage("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri())); - // don't call {@code #deleteBlob}, use the same client - final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blobName); - azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); - } - }); - } - /** * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile * It should remove the container part (first part of the path) and gives path/to/myfile diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index 3f7a5df8f14b2..e57d855cb0ee5 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -129,14 +129,6 @@ private AzureStorageSettings(String account, String key, String endpointSuffix, this.locationMode = LocationMode.PRIMARY_ONLY; } - public String getKey() { - return key; - } - - public String getAccount() { - return account; - } - public String getEndpointSuffix() { return endpointSuffix; } @@ -207,7 +199,7 @@ public static Map load(Settings settings) { // pkg private for tests /** Parse settings for a single client. */ - static AzureStorageSettings getClientSettings(Settings settings, String clientName) { + private static AzureStorageSettings getClientSettings(Settings settings, String clientName) { try (SecureString account = getConfigValue(settings, clientName, ACCOUNT_SETTING); SecureString key = getConfigValue(settings, clientName, KEY_SETTING)) { return new AzureStorageSettings(account.toString(), key.toString(), @@ -226,7 +218,7 @@ private static T getConfigValue(Settings settings, String clientName, return concreteSetting.get(settings); } - public static T getValue(Settings settings, String groupName, Setting setting) { + private static T getValue(Settings settings, String groupName, Setting setting) { final Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey(); final String fullKey = k.toConcreteKey(groupName).toString(); return setting.getConcreteSetting(fullKey).get(settings); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java index da8b85430067c..1400cc5b06627 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java @@ -48,7 +48,7 @@ public static T doPrivilegedIOException(PrivilegedExceptionAction operati } } - public static T doPrivilegedException(PrivilegedExceptionAction operation) throws StorageException, URISyntaxException { + public static T doPrivilegedException(PrivilegedExceptionAction operation) throws StorageException { SpecialPermission.check(); try { return AccessController.doPrivileged(operation); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java index a06dd7c3f28b1..13cc487a1c122 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java @@ -19,24 +19,17 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; -import java.io.IOException; -import java.net.URISyntaxException; public class AzureBlobStoreContainerTests extends ESBlobStoreContainerTestCase { @Override - protected BlobStore newBlobStore() throws IOException { - try { - RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); - AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, client); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } + protected BlobStore newBlobStore() { + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); + AzureStorageServiceMock client = new AzureStorageServiceMock(); + return new AzureBlobStore(repositoryMetaData, client); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java index 9a0c9039d089c..67d30fda05b69 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java @@ -18,25 +18,17 @@ */ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreTestCase; -import java.io.IOException; -import java.net.URISyntaxException; - public class AzureBlobStoreTests extends ESBlobStoreTestCase { @Override - protected BlobStore newBlobStore() throws IOException { - try { - RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); - AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, client); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } + protected BlobStore newBlobStore() { + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); + AzureStorageServiceMock client = new AzureStorageServiceMock(); + return new AzureBlobStore(repositoryMetaData, client); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 5f3072e1ad9ed..17502a1d1f982 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -33,7 +33,6 @@ import java.io.IOException; import java.io.InputStream; import java.net.SocketPermission; -import java.net.URISyntaxException; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import java.security.AccessController; @@ -61,21 +60,13 @@ public boolean doesContainerExist(String account, String container) { return true; } - @Override - public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { - final Map blobs = listBlobsByPrefix(account, container, path, null); - for (String key : blobs.keySet()) { - deleteBlob(account, container, key); - } - } - @Override public boolean blobExists(String account, String container, String blob) { return blobs.containsKey(blob); } @Override - public void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException { + public void deleteBlob(String account, String container, String blob) throws StorageException { if (blobs.remove(blob) == null) { throw new StorageException("BlobNotFound", "[" + blob + "] does not exist.", 404, null, null); } @@ -109,8 +100,7 @@ public Map listBlobsByPrefix(String account, String contai @Override public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize, - boolean failIfAlreadyExists) - throws URISyntaxException, StorageException, FileAlreadyExistsException { + boolean failIfAlreadyExists) throws StorageException, FileAlreadyExistsException { if (failIfAlreadyExists && blobs.containsKey(blobName)) { throw new FileAlreadyExistsException(blobName); } From 4766ffa032ef0036e77c9d57165d36d8e0de9258 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 21 May 2019 20:37:45 +0200 Subject: [PATCH 160/321] Make unwrapCorrupt Check Suppressed Ex. (#41889) * Make unwrapCorrupt Check Suppressed Ex. * As discussed in #24800 we want to check for suppressed corruption indicating exceptions here as well to more reliably categorize corruption related exceptions * Closes #24800, 41201 --- .../org/elasticsearch/ExceptionsHelper.java | 36 +++++++++++++++++-- .../elasticsearch/ExceptionsHelperTests.java | 28 +++++++++++++++ .../recovery/RecoverySourceHandlerTests.java | 4 ++- 3 files changed, 64 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index e4269a375dd6c..48461ffe30d4b 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -175,12 +175,42 @@ public static T useOrSuppress(T first, T second) { return first; } + private static final List> CORRUPTION_EXCEPTIONS = + List.of(CorruptIndexException.class, IndexFormatTooOldException.class, IndexFormatTooNewException.class); + + /** + * Looks at the given Throwable's and its cause(s) as well as any suppressed exceptions on the Throwable as well as its causes + * and returns the first corruption indicating exception (as defined by {@link #CORRUPTION_EXCEPTIONS}) it finds. + * @param t Throwable + * @return Corruption indicating exception if one is found, otherwise {@code null} + */ public static IOException unwrapCorruption(Throwable t) { - return (IOException) unwrap(t, CorruptIndexException.class, - IndexFormatTooOldException.class, - IndexFormatTooNewException.class); + if (t != null) { + do { + for (Class clazz : CORRUPTION_EXCEPTIONS) { + if (clazz.isInstance(t)) { + return (IOException) t; + } + } + for (Throwable suppressed : t.getSuppressed()) { + IOException corruptionException = unwrapCorruption(suppressed); + if (corruptionException != null) { + return corruptionException; + } + } + } while ((t = t.getCause()) != null); + } + return null; } + /** + * Looks at the given Throwable and its cause(s) and returns the first Throwable that is of one of the given classes or {@code null} + * if no matching Throwable is found. Unlike {@link #unwrapCorruption} this method does only check the given Throwable and its causes + * but does not look at any suppressed exceptions. + * @param t Throwable + * @param clazzes Classes to look for + * @return Matching Throwable if one is found, otherwise {@code null} + */ public static Throwable unwrap(Throwable t, Class... clazzes) { if (t != null) { do { diff --git a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java index 1d2a4ca6d5f75..2de2f259e6ff1 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java @@ -20,6 +20,7 @@ package org.elasticsearch; import org.apache.commons.codec.DecoderException; +import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.ShardSearchFailure; @@ -183,4 +184,31 @@ public void testGroupByNullIndex() { ShardOperationFailedException[] groupBy = ExceptionsHelper.groupBy(failures); assertThat(groupBy.length, equalTo(2)); } + + public void testUnwrapCorruption() { + final Throwable corruptIndexException = new CorruptIndexException("corrupt", "resource"); + assertThat(ExceptionsHelper.unwrapCorruption(corruptIndexException), equalTo(corruptIndexException)); + + final Throwable corruptionAsCause = new RuntimeException(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionAsCause), equalTo(corruptIndexException)); + + final Throwable corruptionSuppressed = new RuntimeException(); + corruptionSuppressed.addSuppressed(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionSuppressed), equalTo(corruptIndexException)); + + final Throwable corruptionSuppressedOnCause = new RuntimeException(new RuntimeException()); + corruptionSuppressedOnCause.getCause().addSuppressed(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionSuppressedOnCause), equalTo(corruptIndexException)); + + final Throwable corruptionCauseOnSuppressed = new RuntimeException(); + corruptionCauseOnSuppressed.addSuppressed(new RuntimeException(corruptIndexException)); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionCauseOnSuppressed), equalTo(corruptIndexException)); + + assertThat(ExceptionsHelper.unwrapCorruption(new RuntimeException()), nullValue()); + assertThat(ExceptionsHelper.unwrapCorruption(new RuntimeException(new RuntimeException())), nullValue()); + + final Throwable withSuppressedException = new RuntimeException(); + withSuppressedException.addSuppressed(new RuntimeException()); + assertThat(ExceptionsHelper.unwrapCorruption(withSuppressedException), nullValue()); + } } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index b63c7a2e0e8f6..b49bef57aceb1 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -438,10 +438,12 @@ protected void failEngine(IOException cause) { handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), () -> 0); fail("exception index"); } catch (RuntimeException ex) { - assertNull(ExceptionsHelper.unwrapCorruption(ex)); + final IOException unwrappedCorruption = ExceptionsHelper.unwrapCorruption(ex); if (throwCorruptedIndexException) { + assertNotNull(unwrappedCorruption); assertEquals(ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]"); } else { + assertNull(unwrappedCorruption); assertEquals(ex.getMessage(), "boom"); } } catch (CorruptIndexException ex) { From 07ab45a426912a78d3951e29169a47885c1f3246 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Tue, 21 May 2019 15:07:51 -0400 Subject: [PATCH 161/321] Mute transforms_stats yaml test AwaitsFix https://github.com/elastic/elasticsearch/issues/42309 --- .../rest-api-spec/test/data_frame/transforms_stats.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index 93c942f0733a8..61117b138bff7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -42,6 +42,9 @@ teardown: --- "Test get transform stats": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309" - do: data_frame.get_data_frame_transform_stats: transform_id: "airline-transform-stats" From f5e54b495dba85b89b25387a673336ef4a73b653 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 21 May 2019 12:48:13 -0700 Subject: [PATCH 162/321] Deprecate support for chained multi-fields. (#41926) We now issue a deprecation warning if a multi-field definition contains a `[fields]` entry. This PR also simplifies the definition of `MultiFieldParserContext`. Addresses #41267. --- .../elasticsearch/index/mapper/Mapper.java | 8 ++-- .../index/mapper/TypeParsers.java | 14 ++++++- .../mapper/ExternalFieldMapperTests.java | 12 ++++++ .../index/mapper/TypeParsersTests.java | 37 +++++++++++++++++++ 4 files changed, 65 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index d98630e5f765e..5de5394a94abe 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -136,10 +136,7 @@ public Supplier queryShardContextSupplier() { protected Function similarityLookupService() { return similarityLookupService; } public ParserContext createMultiFieldContext(ParserContext in) { - return new MultiFieldParserContext(in) { - @Override - public boolean isWithinMultiField() { return true; } - }; + return new MultiFieldParserContext(in); } static class MultiFieldParserContext extends ParserContext { @@ -147,6 +144,9 @@ static class MultiFieldParserContext extends ParserContext { super(in.type(), in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.queryShardContextSupplier()); } + + @Override + public boolean isWithinMultiField() { return true; } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index 77d7be62fc1b9..9848a23cac11b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -19,8 +19,10 @@ package org.elasticsearch.index.mapper; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.analysis.AnalysisMode; @@ -37,6 +39,7 @@ import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; public class TypeParsers { + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(TypeParsers.class)); public static final String DOC_VALUES = "doc_values"; public static final String INDEX_OPTIONS_DOCS = "docs"; @@ -214,11 +217,18 @@ public static void parseField(FieldMapper.Builder builder, String name, Map multiFieldsPropNodes; + parserContext = parserContext.createMultiFieldContext(parserContext); + final Map multiFieldsPropNodes; if (propNode instanceof List && ((List) propNode).isEmpty()) { multiFieldsPropNodes = Collections.emptyMap(); } else if (propNode instanceof Map) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index d80776007aba8..e5d3040f7a3bc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -169,6 +169,12 @@ public void testExternalValuesWithMultifield() throws Exception { assertThat(raw, notNullValue()); assertThat(raw.binaryValue(), is(new BytesRef("foo"))); + + assertWarnings("At least one multi-field, [field], was " + + "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + + "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + + "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + + "switching to [copy_to] if appropriate."); } public void testExternalValuesWithMultifieldTwoLevels() throws Exception { @@ -234,5 +240,11 @@ public void testExternalValuesWithMultifieldTwoLevels() throws Exception { assertThat(doc.rootDoc().getField("field.raw"), notNullValue()); assertThat(doc.rootDoc().getField("field.raw").stringValue(), is("foo")); + + assertWarnings("At least one multi-field, [field], was " + + "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + + "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + + "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + + "switching to [copy_to] if appropriate."); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index bc59c59aa54ab..70f469b96370c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -24,7 +24,11 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.elasticsearch.index.analysis.AnalysisMode; @@ -36,6 +40,7 @@ import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -157,6 +162,38 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext); } + public void testMultiFieldWithinMultiField() throws IOException { + TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField"); + + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject() + .field("type", "keyword") + .startObject("fields") + .startObject("sub-field") + .field("type", "keyword") + .startObject("fields") + .startObject("sub-sub-field") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + + Map fieldNode = XContentHelper.convertToMap( + BytesReference.bytes(mapping), true, mapping.contentType()).v2(); + + Mapper.TypeParser typeParser = new KeywordFieldMapper.TypeParser(); + Mapper.TypeParser.ParserContext parserContext = new Mapper.TypeParser.ParserContext("type", + null, null, type -> typeParser, Version.CURRENT, null); + + TypeParsers.parseField(builder, "some-field", fieldNode, parserContext); + assertWarnings("At least one multi-field, [sub-field], was " + + "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + + "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + + "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + + "switching to [copy_to] if appropriate."); + } + private Analyzer createAnalyzerWithMode(String name, AnalysisMode mode) { TokenFilterFactory tokenFilter = new AbstractTokenFilterFactory(indexSettings, name, Settings.EMPTY) { @Override From 1dcaf4f1f8d98a6cd20a531a56ed9dd3a4588747 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Tue, 21 May 2019 15:45:34 -0400 Subject: [PATCH 163/321] Mute another transforms_stats yaml test AwaitsFix https://github.com/elastic/elasticsearch/issues/42309 --- .../rest-api-spec/test/data_frame/transforms_stats.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index 61117b138bff7..f552e4710c781 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -149,6 +149,9 @@ teardown: --- "Test get multiple transform stats where one does not have a task": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309" - do: data_frame.put_data_frame_transform: transform_id: "airline-transform-stats-dos" From 4b0f36d361e6dc108cb9a9978f0bd1b31016baf8 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 21 May 2019 15:53:28 -0400 Subject: [PATCH 164/321] Execute actions under permit in primary mode only (#42241) Today when executing an action on a primary shard under permit, we do not enforce that the shard is in primary mode before executing the action. This commit addresses this by wrapping actions to be executed under permit in a check that the shard is in primary mode before executing the action. --- .../elasticsearch/ElasticsearchException.java | 7 +- .../TransportReplicationAction.java | 17 ++- .../index/seqno/RetentionLeaseActions.java | 17 +-- .../elasticsearch/index/shard/IndexShard.java | 25 +++- .../shard/ShardNotInPrimaryModeException.java | 36 +++++ .../ExceptionSerializationTests.java | 2 + .../TransportReplicationActionTests.java | 51 ++++++- .../index/shard/IndexShardTests.java | 128 ++++++++++++++---- 8 files changed, 235 insertions(+), 48 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 861228d221778..85df20d849afa 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1022,7 +1022,12 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.index.seqno.RetentionLeaseNotFoundException.class, org.elasticsearch.index.seqno.RetentionLeaseNotFoundException::new, 154, - Version.V_6_7_0); + Version.V_6_7_0), + SHARD_NOT_IN_PRIMARY_MODE_EXCEPTION( + org.elasticsearch.index.shard.ShardNotInPrimaryModeException.class, + org.elasticsearch.index.shard.ShardNotInPrimaryModeException::new, + 155, + Version.V_6_8_1); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 6edaa95033997..d19009433deb5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -63,6 +63,7 @@ import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; @@ -307,10 +308,18 @@ protected void doRun() throws Exception { primaryRequest.getTargetAllocationID(), primaryRequest.getPrimaryTerm(), actualTerm); } - acquirePrimaryOperationPermit(indexShard, primaryRequest.getRequest(), ActionListener.wrap( - releasable -> runWithPrimaryShardReference(new PrimaryShardReference(indexShard, releasable)), - this::onFailure - )); + acquirePrimaryOperationPermit( + indexShard, + primaryRequest.getRequest(), + ActionListener.wrap( + releasable -> runWithPrimaryShardReference(new PrimaryShardReference(indexShard, releasable)), + e -> { + if (e instanceof ShardNotInPrimaryModeException) { + onFailure(new ReplicationOperation.RetryOnPrimaryException(shardId, "shard is not in primary mode", e)); + } else { + onFailure(e); + } + })); } void runWithPrimaryShardReference(final PrimaryShardReference primaryShardReference) { diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java index c69a4c6fab042..74c98bf3dca19 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java @@ -28,8 +28,6 @@ import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -45,7 +43,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.Collections; import java.util.Objects; import java.util.function.Supplier; @@ -88,14 +85,10 @@ abstract static class TransportRetentionLeaseAction> extend @Override protected ShardsIterator shards(final ClusterState state, final InternalRequest request) { - final IndexShardRoutingTable shardRoutingTable = state + return state .routingTable() - .shardRoutingTable(request.concreteIndex(), request.request().getShardId().id()); - if (shardRoutingTable.primaryShard().active()) { - return shardRoutingTable.primaryShardIt(); - } else { - return new PlainShardIterator(request.request().getShardId(), Collections.emptyList()); - } + .shardRoutingTable(request.concreteIndex(), request.request().getShardId().id()) + .primaryShardIt(); } @Override @@ -174,6 +167,7 @@ void doRetentionLeaseAction(final IndexShard indexShard, final AddRequest reques protected Writeable.Reader getResponseReader() { return Response::new; } + } @Override @@ -400,9 +394,10 @@ public static class Response extends ActionResponse { public Response() { } - Response(StreamInput in) throws IOException { + Response(final StreamInput in) throws IOException { super(in); } + } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 11e4fb81d9fbe..da5ee8f8363ff 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.shard; import com.carrotsearch.hppc.ObjectLongMap; - import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; @@ -2496,7 +2495,7 @@ public void acquirePrimaryOperationPermit(ActionListener onPermitAcq verifyNotClosed(); assert shardRouting.primary() : "acquirePrimaryOperationPermit should only be called on primary shard: " + shardRouting; - indexShardOperationPermits.acquire(onPermitAcquired, executorOnDelay, false, debugInfo); + indexShardOperationPermits.acquire(wrapPrimaryOperationPermitListener(onPermitAcquired), executorOnDelay, false, debugInfo); } /** @@ -2507,7 +2506,27 @@ public void acquireAllPrimaryOperationsPermits(final ActionListener verifyNotClosed(); assert shardRouting.primary() : "acquireAllPrimaryOperationsPermits should only be called on primary shard: " + shardRouting; - asyncBlockOperations(onPermitAcquired, timeout.duration(), timeout.timeUnit()); + asyncBlockOperations(wrapPrimaryOperationPermitListener(onPermitAcquired), timeout.duration(), timeout.timeUnit()); + } + + /** + * Wraps the action to run on a primary after acquiring permit. This wrapping is used to check if the shard is in primary mode before + * executing the action. + * + * @param listener the listener to wrap + * @return the wrapped listener + */ + private ActionListener wrapPrimaryOperationPermitListener(final ActionListener listener) { + return ActionListener.delegateFailure( + listener, + (l, r) -> { + if (replicationTracker.isPrimaryMode()) { + l.onResponse(r); + } else { + r.close(); + l.onFailure(new ShardNotInPrimaryModeException(shardId, state)); + } + }); } private void asyncBlockOperations(ActionListener onPermitAcquired, long timeout, TimeUnit timeUnit) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java b/server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java new file mode 100644 index 0000000000000..8bc23dcdd00f7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class ShardNotInPrimaryModeException extends IllegalIndexShardStateException { + + public ShardNotInPrimaryModeException(final ShardId shardId, final IndexShardState currentState) { + super(shardId, currentState, "shard is not in primary mode"); + } + + public ShardNotInPrimaryModeException(final StreamInput in) throws IOException { + super(in); + } + +} diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 1fac56886de45..a0aafbb41d371 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -66,6 +66,7 @@ import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.recovery.RecoverFilesRecoveryException; @@ -816,6 +817,7 @@ public void testIds() { ids.put(152, NoSuchRemoteClusterException.class); ids.put(153, RetentionLeaseAlreadyExistsException.class); ids.put(154, RetentionLeaseNotFoundException.class); + ids.put(155, ShardNotInPrimaryModeException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 12cc9097b652c..4459aa5556988 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -64,9 +64,11 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; +import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -390,6 +392,43 @@ public void testNotStartedPrimary() { assertIndexShardCounter(0); } + public void testShardNotInPrimaryMode() { + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + final ClusterState state = state(index, true, ShardRoutingState.RELOCATING); + setState(clusterService, state); + final ReplicationTask task = maybeTask(); + final Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + final AtomicBoolean executed = new AtomicBoolean(); + + final ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard(); + final long primaryTerm = state.metaData().index(index).primaryTerm(shardId.id()); + final TransportReplicationAction.ConcreteShardRequest primaryRequest + = new TransportReplicationAction.ConcreteShardRequest<>(request, primaryShard.allocationId().getId(), primaryTerm); + + isPrimaryMode.set(false); + + new TestAction(Settings.EMPTY, "internal:test-action", transportService, clusterService, shardStateAction, threadPool) { + @Override + protected void shardOperationOnPrimary(Request shardRequest, IndexShard primary, + ActionListener> listener) { + assertPhase(task, "primary"); + assertFalse(executed.getAndSet(true)); + super.shardOperationOnPrimary(shardRequest, primary, listener); + } + }.new AsyncPrimaryAction(primaryRequest, listener, task).run(); + + assertFalse(executed.get()); + assertIndexShardCounter(0); // no permit should be held + + final ExecutionException e = expectThrows(ExecutionException.class, listener::get); + assertThat(e.getCause(), instanceOf(ReplicationOperation.RetryOnPrimaryException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + assertThat(e.getCause().getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause().getCause(), hasToString(containsString("shard is not in primary mode"))); + } + /** * When relocating a primary shard, there is a cluster state update at the end of relocation where the active primary is switched from * the relocation source to the relocation target. If relocation source receives and processes this cluster state @@ -1126,6 +1165,8 @@ private void assertIndexShardCounter(int expected) { private final AtomicBoolean isRelocated = new AtomicBoolean(false); + private final AtomicBoolean isPrimaryMode = new AtomicBoolean(true); + /** * Sometimes build a ReplicationTask for tracking the phase of the * TransportReplicationAction. Since TransportReplicationAction has to work @@ -1271,10 +1312,16 @@ private IndexService mockIndexService(final IndexMetaData indexMetaData, Cluster private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService) { final IndexShard indexShard = mock(IndexShard.class); when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.state()).thenReturn(IndexShardState.STARTED); doAnswer(invocation -> { ActionListener callback = (ActionListener) invocation.getArguments()[0]; - count.incrementAndGet(); - callback.onResponse(count::decrementAndGet); + if (isPrimaryMode.get()) { + count.incrementAndGet(); + callback.onResponse(count::decrementAndGet); + + } else { + callback.onFailure(new ShardNotInPrimaryModeException(shardId, IndexShardState.STARTED)); + } return null; }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString(), anyObject()); doAnswer(invocation -> { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 0be7b4433fac3..786d5bc5e8df8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -636,11 +636,13 @@ public void testOperationPermitsOnPrimaryShards() throws Exception { final ShardId shardId = new ShardId("test", "_na_", 0); final IndexShard indexShard; + final boolean isPrimaryMode; if (randomBoolean()) { // relocation target indexShard = newShard(newShardRouting(shardId, "local_node", "other node", true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(AllocationId.newInitializing()))); assertEquals(0, indexShard.getActiveOperationsCount()); + isPrimaryMode = false; } else if (randomBoolean()) { // simulate promotion indexShard = newStartedShard(false); @@ -660,21 +662,60 @@ public void testOperationPermitsOnPrimaryShards() throws Exception { if (randomBoolean()) { assertBusy(() -> assertEquals(0, indexShard.getActiveOperationsCount())); } + isPrimaryMode = true; } else { indexShard = newStartedShard(true); assertEquals(0, indexShard.getActiveOperationsCount()); + isPrimaryMode = true; } - final long primaryTerm = indexShard.getPendingPrimaryTerm(); - Releasable operation1 = acquirePrimaryOperationPermitBlockingly(indexShard); - assertEquals(1, indexShard.getActiveOperationsCount()); - Releasable operation2 = acquirePrimaryOperationPermitBlockingly(indexShard); - assertEquals(2, indexShard.getActiveOperationsCount()); + assert indexShard.getReplicationTracker().isPrimaryMode() == isPrimaryMode; + final long pendingPrimaryTerm = indexShard.getPendingPrimaryTerm(); + if (isPrimaryMode) { + Releasable operation1 = acquirePrimaryOperationPermitBlockingly(indexShard); + assertEquals(1, indexShard.getActiveOperationsCount()); + Releasable operation2 = acquirePrimaryOperationPermitBlockingly(indexShard); + assertEquals(2, indexShard.getActiveOperationsCount()); - Releasables.close(operation1, operation2); - assertEquals(0, indexShard.getActiveOperationsCount()); + Releasables.close(operation1, operation2); + assertEquals(0, indexShard.getActiveOperationsCount()); + } else { + indexShard.acquirePrimaryOperationPermit( + new ActionListener<>() { + @Override + public void onResponse(final Releasable releasable) { + throw new AssertionError(); + } + + @Override + public void onFailure(final Exception e) { + assertThat(e, instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e, hasToString(containsString("shard is not in primary mode"))); + } + }, + ThreadPool.Names.SAME, + "test"); + + final CountDownLatch latch = new CountDownLatch(1); + indexShard.acquireAllPrimaryOperationsPermits( + new ActionListener<>() { + @Override + public void onResponse(final Releasable releasable) { + throw new AssertionError(); + } + + @Override + public void onFailure(final Exception e) { + assertThat(e, instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e, hasToString(containsString("shard is not in primary mode"))); + latch.countDown(); + } + }, + TimeValue.timeValueSeconds(30)); + latch.await(); + } if (Assertions.ENABLED && indexShard.routingEntry().isRelocationTarget() == false) { - assertThat(expectThrows(AssertionError.class, () -> indexShard.acquireReplicaOperationPermit(primaryTerm, + assertThat(expectThrows(AssertionError.class, () -> indexShard.acquireReplicaOperationPermit(pendingPrimaryTerm, indexShard.getGlobalCheckpoint(), indexShard.getMaxSeqNoOfUpdatesOrDeletes(), new ActionListener() { @Override public void onResponse(Releasable releasable) { @@ -1688,10 +1729,9 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { // recovery can be now finalized recoveryThread.join(); assertTrue(shard.isRelocatedPrimary()); - try (Releasable ignored = acquirePrimaryOperationPermitBlockingly(shard)) { - // lock can again be acquired - assertTrue(shard.isRelocatedPrimary()); - } + final ExecutionException e = expectThrows(ExecutionException.class, () -> acquirePrimaryOperationPermitBlockingly(shard)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); closeShards(shard); } @@ -1699,30 +1739,64 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { final IndexShard shard = newStartedShard(true); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); + final CountDownLatch startRecovery = new CountDownLatch(1); + final CountDownLatch relocationStarted = new CountDownLatch(1); Thread recoveryThread = new Thread(() -> { try { - shard.relocated(primaryContext -> {}); + startRecovery.await(); + shard.relocated(primaryContext -> relocationStarted.countDown()); } catch (InterruptedException e) { throw new RuntimeException(e); } }); recoveryThread.start(); - List> onLockAcquiredActions = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - PlainActionFuture onLockAcquired = new PlainActionFuture() { - @Override - public void onResponse(Releasable releasable) { - releasable.close(); - super.onResponse(releasable); - } - }; - shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.WRITE, "i_" + i); - onLockAcquiredActions.add(onLockAcquired); - } - for (PlainActionFuture onLockAcquired : onLockAcquiredActions) { - assertNotNull(onLockAcquired.get(30, TimeUnit.SECONDS)); + final int numberOfAcquisitions = randomIntBetween(1, 10); + final int recoveryIndex = randomIntBetween(1, numberOfAcquisitions); + + for (int i = 0; i < numberOfAcquisitions; i++) { + + final PlainActionFuture onLockAcquired; + final Runnable assertion; + if (i < recoveryIndex) { + final AtomicBoolean invoked = new AtomicBoolean(); + onLockAcquired = new PlainActionFuture<>() { + + @Override + public void onResponse(Releasable releasable) { + invoked.set(true); + releasable.close(); + super.onResponse(releasable); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(); + } + + }; + assertion = () -> assertTrue(invoked.get()); + } else if (recoveryIndex == i) { + startRecovery.countDown(); + relocationStarted.await(); + onLockAcquired = new PlainActionFuture<>(); + assertion = () -> { + final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + }; + } else { + onLockAcquired = new PlainActionFuture<>(); + assertion = () -> { + final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + }; + } + + shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.WRITE, "i_" + i); + assertion.run(); } recoveryThread.join(); From e8b85c90e941fe7003f7446c4364523663d681bc Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 21 May 2019 17:06:51 -0400 Subject: [PATCH 165/321] Mute testDelayedOperationsBeforeAndAfterRelocated Tracked at #42325 --- .../test/java/org/elasticsearch/index/shard/IndexShardTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 786d5bc5e8df8..64b0c0db1dc8c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1736,6 +1736,7 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { closeShards(shard); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42325") public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { final IndexShard shard = newStartedShard(true); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); From 2d8869175b102565f73ca14ae4d2d6e3fb66660b Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Tue, 21 May 2019 14:27:57 -0700 Subject: [PATCH 166/321] remove backcompat handling of 6.2.x versions (#42044) relates to refactoring initiative #41164. --- .../elasticsearch/ElasticsearchException.java | 2 +- .../main/java/org/elasticsearch/Version.java | 20 ------- .../org/elasticsearch/index/store/Store.java | 19 +----- .../indices/flush/SyncedFlushService.java | 14 +---- .../search/slice/SliceBuilder.java | 10 +--- .../java/org/elasticsearch/BuildTests.java | 5 -- .../ExceptionSerializationTests.java | 2 +- .../common/lucene/uid/VersionsTests.java | 14 +---- .../index/analysis/PreBuiltAnalyzerTests.java | 4 +- .../search/slice/SliceBuilderTests.java | 26 ++------ .../xpack/core/ml/action/UpdateJobAction.java | 10 +--- .../core/ml/action/UpdateProcessAction.java | 13 ++-- .../core/ml/datafeed/DatafeedConfig.java | 10 +--- .../core/ml/job/config/AnalysisConfig.java | 11 +--- .../xpack/core/ml/job/results/Bucket.java | 12 +--- .../action/token/CreateTokenRequest.java | 60 ++++++------------- .../action/token/CreateTokenResponse.java | 8 --- .../action/TransportOpenJobActionTests.java | 10 ++-- .../monitoring/MonitoringFeatureSetTests.java | 3 +- .../xpack/restart/FullClusterRestartIT.java | 8 +-- 20 files changed, 58 insertions(+), 203 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 85df20d849afa..a2e53a1189f1b 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1006,7 +1006,7 @@ private enum ElasticsearchExceptionHandle { UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.UnknownNamedObjectException.class, org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, UNKNOWN_VERSION_ADDED), TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class, - MultiBucketConsumerService.TooManyBucketsException::new, 149, Version.V_6_2_0), + MultiBucketConsumerService.TooManyBucketsException::new, 149, UNKNOWN_VERSION_ADDED), COORDINATION_STATE_REJECTED_EXCEPTION(org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, Version.V_7_0_0), SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 0a6b19444efa7..90b7ae869e811 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -48,16 +48,6 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); // The below version is missing from the 7.3 JAR private static final org.apache.lucene.util.Version LUCENE_7_2_1 = org.apache.lucene.util.Version.fromBits(7, 2, 1); - public static final int V_6_2_0_ID = 6020099; - public static final Version V_6_2_0 = new Version(V_6_2_0_ID, LUCENE_7_2_1); - public static final int V_6_2_1_ID = 6020199; - public static final Version V_6_2_1 = new Version(V_6_2_1_ID, LUCENE_7_2_1); - public static final int V_6_2_2_ID = 6020299; - public static final Version V_6_2_2 = new Version(V_6_2_2_ID, LUCENE_7_2_1); - public static final int V_6_2_3_ID = 6020399; - public static final Version V_6_2_3 = new Version(V_6_2_3_ID, LUCENE_7_2_1); - public static final int V_6_2_4_ID = 6020499; - public static final Version V_6_2_4 = new Version(V_6_2_4_ID, LUCENE_7_2_1); public static final int V_6_3_0_ID = 6030099; public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_3_1_ID = 6030199; @@ -176,16 +166,6 @@ public static Version fromId(int id) { return V_6_3_1; case V_6_3_0_ID: return V_6_3_0; - case V_6_2_4_ID: - return V_6_2_4; - case V_6_2_3_ID: - return V_6_2_3; - case V_6_2_2_ID: - return V_6_2_2; - case V_6_2_1_ID: - return V_6_2_1; - case V_6_2_0_ID: - return V_6_2_0; case V_EMPTY_ID: return V_EMPTY; default: diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 65d2f8d7812f8..5f1f7d23a8c6a 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -1550,23 +1550,8 @@ public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long final IndexCommit lastIndexCommitCommit = existingCommits.get(existingCommits.size() - 1); final String translogUUID = lastIndexCommitCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY); final IndexCommit startingIndexCommit; - // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog - // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. - // To avoid this issue, we only select index commits whose translog are fully retained. - if (indexVersionCreated.before(org.elasticsearch.Version.V_6_2_0)) { - final List recoverableCommits = new ArrayList<>(); - for (IndexCommit commit : existingCommits) { - if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { - recoverableCommits.add(commit); - } - } - assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " + - "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); - } else { - // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); - } + // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); if (translogUUID.equals(startingIndexCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY)) == false) { throw new IllegalStateException("starting commit translog uuid [" diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 921a8f9cc7c47..6291531b7f907 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -594,10 +594,6 @@ static final class PreSyncedFlushResponse extends TransportResponse { this.existingSyncId = existingSyncId; } - boolean includeNumDocs(Version version) { - return version.onOrAfter(Version.V_6_2_2); - } - boolean includeExistingSyncId(Version version) { return version.onOrAfter(Version.V_6_3_0); } @@ -606,11 +602,7 @@ boolean includeExistingSyncId(Version version) { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); commitId = new Engine.CommitId(in); - if (includeNumDocs(in.getVersion())) { - numDocs = in.readInt(); - } else { - numDocs = UNKNOWN_NUM_DOCS; - } + numDocs = in.readInt(); if (includeExistingSyncId(in.getVersion())) { existingSyncId = in.readOptionalString(); } @@ -620,9 +612,7 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); commitId.writeTo(out); - if (includeNumDocs(out.getVersion())) { - out.writeInt(numDocs); - } + out.writeInt(numDocs); if (includeExistingSyncId(out.getVersion())) { out.writeOptionalString(existingSyncId); } diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 40e10eb589006..08f042aa69650 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -106,10 +106,6 @@ public SliceBuilder(String field, int id, int max) { public SliceBuilder(StreamInput in) throws IOException { String field = in.readString(); - if ("_uid".equals(field) && in.getVersion().before(Version.V_6_3_0)) { - // This is safe because _id and _uid are handled the same way in #toFilter - field = IdFieldMapper.NAME; - } this.field = field; this.id = in.readVInt(); this.max = in.readVInt(); @@ -117,11 +113,7 @@ public SliceBuilder(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (IdFieldMapper.NAME.equals(field) && out.getVersion().before(Version.V_6_3_0)) { - out.writeString("_uid"); - } else { - out.writeString(field); - } + out.writeString(field); out.writeVInt(id); out.writeVInt(max); } diff --git a/server/src/test/java/org/elasticsearch/BuildTests.java b/server/src/test/java/org/elasticsearch/BuildTests.java index e0d8140c708d6..59e289b9e98ef 100644 --- a/server/src/test/java/org/elasticsearch/BuildTests.java +++ b/server/src/test/java/org/elasticsearch/BuildTests.java @@ -199,29 +199,24 @@ public void testSerializationBWC() throws IOException { randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6))); final List versions = Version.getDeclaredVersions(Version.class); - final Version pre63Version = randomFrom(versions.stream().filter(v -> v.before(Version.V_6_3_0)).collect(Collectors.toList())); final Version post63Pre67Version = randomFrom(versions.stream() .filter(v -> v.onOrAfter(Version.V_6_3_0) && v.before(Version.V_6_7_0)).collect(Collectors.toList())); final Version post67Pre70Version = randomFrom(versions.stream() .filter(v -> v.onOrAfter(Version.V_6_7_0) && v.before(Version.V_7_0_0)).collect(Collectors.toList())); final Version post70Version = randomFrom(versions.stream().filter(v -> v.onOrAfter(Version.V_7_0_0)).collect(Collectors.toList())); - final WriteableBuild pre63 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, pre63Version); final WriteableBuild post63pre67 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post63Pre67Version); final WriteableBuild post67pre70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post67Pre70Version); final WriteableBuild post70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post70Version); - assertThat(pre63.build.flavor(), equalTo(Build.Flavor.OSS)); assertThat(post63pre67.build.flavor(), equalTo(dockerBuild.build.flavor())); assertThat(post67pre70.build.flavor(), equalTo(dockerBuild.build.flavor())); assertThat(post70.build.flavor(), equalTo(dockerBuild.build.flavor())); - assertThat(pre63.build.type(), equalTo(Build.Type.UNKNOWN)); assertThat(post63pre67.build.type(), equalTo(Build.Type.TAR)); assertThat(post67pre70.build.type(), equalTo(dockerBuild.build.type())); assertThat(post70.build.type(), equalTo(dockerBuild.build.type())); - assertThat(pre63.build.getQualifiedVersion(), equalTo(pre63Version.toString())); assertThat(post63pre67.build.getQualifiedVersion(), equalTo(post63Pre67Version.toString())); assertThat(post67pre70.build.getQualifiedVersion(), equalTo(post67Pre70Version.toString())); assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index a0aafbb41d371..5b33068013965 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -366,7 +366,7 @@ public void testCircuitBreakingException() throws IOException { } public void testTooManyBucketsException() throws IOException { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_2_0, Version.CURRENT); + Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); MultiBucketConsumerService.TooManyBucketsException ex = serialize(new MultiBucketConsumerService.TooManyBucketsException("Too many buckets", 100), version); assertEquals("Too many buckets", ex.getMessage()); diff --git a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java index febe2b976fb47..94945dc92c952 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java @@ -37,7 +37,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import static org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.loadDocIdAndVersion; @@ -189,23 +188,16 @@ public void testCacheFilterReader() throws Exception { } public void testLuceneVersionOnUnknownVersions() { - List allVersions = VersionUtils.allVersions(); - - // should have the same Lucene version as the latest 6.x version - Version version = Version.fromString("6.88.50"); - assertEquals(allVersions.get(Collections.binarySearch(allVersions, Version.V_7_0_0) - 1).luceneVersion, - version.luceneVersion); - // between two known versions, should use the lucene version of the previous version - version = Version.fromString("6.2.50"); - assertEquals(VersionUtils.getPreviousVersion(Version.V_6_2_4).luceneVersion, version.luceneVersion); + Version version = VersionUtils.getPreviousVersion(Version.CURRENT); + assertEquals(Version.fromId(version.id + 100).luceneVersion, version.luceneVersion); // too old version, major should be the oldest supported lucene version minus 1 version = Version.fromString("5.2.1"); assertEquals(VersionUtils.getFirstVersion().luceneVersion.major - 1, version.luceneVersion.major); // future version, should be the same version as today - version = Version.fromString("8.77.1"); + version = Version.fromId(Version.CURRENT.id + 100); assertEquals(Version.CURRENT.luceneVersion, version.luceneVersion); } } diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index 65958ec9319c2..3ca1bec5a4b57 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -75,8 +75,8 @@ public void testThatInstancesAreCachedAndReused() { PreBuiltAnalyzers.STANDARD.getAnalyzer(VersionUtils.randomPreviousCompatibleVersion(random(), Version.CURRENT))); // Same Lucene version should be cached: - assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_1), - PreBuiltAnalyzers.STOP.getAnalyzer(Version.V_6_2_2)); + assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(Version.fromString("5.0.0")), + PreBuiltAnalyzers.STOP.getAnalyzer(Version.fromString("5.0.1"))); } public void testThatAnalyzersAreUsedInMapping() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index bf053d34bff56..fffa501cc4be4 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -53,7 +53,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.IndexNumericFieldData; -import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; @@ -63,6 +62,7 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -455,21 +455,6 @@ public void testToFilterDeprecationMessage() throws IOException { } } - public void testSerializationBackcompat() throws IOException { - SliceBuilder sliceBuilder = new SliceBuilder(1, 5); - assertEquals(IdFieldMapper.NAME, sliceBuilder.getField()); - - SliceBuilder copy62 = copyWriteable(sliceBuilder, - new NamedWriteableRegistry(Collections.emptyList()), - SliceBuilder::new, Version.V_6_2_0); - assertEquals(sliceBuilder, copy62); - - SliceBuilder copy63 = copyWriteable(copy62, - new NamedWriteableRegistry(Collections.emptyList()), - SliceBuilder::new, Version.V_6_3_0); - assertEquals(sliceBuilder, copy63); - } - public void testToFilterWithRouting() throws IOException { Directory dir = new RAMDirectory(); try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { @@ -489,15 +474,14 @@ public void testToFilterWithRouting() throws IOException { when(clusterService.operationRouting()).thenReturn(routing); when(clusterService.getSettings()).thenReturn(Settings.EMPTY); try (IndexReader reader = DirectoryReader.open(dir)) { - QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED, 5, 0); + Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + QueryShardContext context = createShardContext(version, reader, "field", DocValuesType.SORTED, 5, 0); SliceBuilder builder = new SliceBuilder("field", 6, 10); String[] routings = new String[] { "foo" }; - Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, Version.CURRENT); + Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, version); assertEquals(new DocValuesSliceQuery("field", 6, 10), query); - query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.CURRENT); + query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, version); assertEquals(new DocValuesSliceQuery("field", 6, 10), query); - query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.V_6_2_0); - assertEquals(new DocValuesSliceQuery("field", 1, 2), query); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java index 85e1615c0dfe0..6ecee409c30f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -92,11 +92,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); jobId = in.readString(); update = new JobUpdate(in); - if (in.getVersion().onOrAfter(Version.V_6_2_2)) { - isInternal = in.readBoolean(); - } else { - isInternal = false; - } + isInternal = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.getVersion().before(Version.V_7_0_0)) { in.readBoolean(); // was waitForAck } @@ -107,9 +103,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); update.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_2_2)) { - out.writeBoolean(isInternal); - } + out.writeBoolean(isInternal); if (out.getVersion().onOrAfter(Version.V_6_3_0) && out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(false); // was waitForAck } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java index 5091ff1f968f1..6a8e1703ad1f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.tasks.BaseTasksResponse; @@ -121,10 +120,8 @@ public Request(StreamInput in) throws IOException { if (in.readBoolean()) { detectorUpdates = in.readList(JobUpdate.DetectorUpdate::new); } - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - filter = in.readOptionalWriteable(MlFilter::new); - updateScheduledEvents = in.readBoolean(); - } + filter = in.readOptionalWriteable(MlFilter::new); + updateScheduledEvents = in.readBoolean(); } @Override @@ -136,10 +133,8 @@ public void writeTo(StreamOutput out) throws IOException { if (hasDetectorUpdates) { out.writeList(detectorUpdates); } - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeOptionalWriteable(filter); - out.writeBoolean(updateScheduledEvents); - } + out.writeOptionalWriteable(filter); + out.writeBoolean(updateScheduledEvents); } public Request(String jobId, ModelPlotConfig modelPlotConfig, List detectorUpdates, MlFilter filter, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 810d97df34636..f08c4a9d7391d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -222,11 +222,7 @@ public DatafeedConfig(StreamInput in) throws IOException { } this.scrollSize = in.readOptionalVInt(); this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); - } else { - this.headers = Collections.emptyMap(); - } + this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); if (in.getVersion().onOrAfter(Version.V_6_6_0)) { delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new); } else { @@ -432,9 +428,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalVInt(scrollSize); out.writeOptionalWriteable(chunkingConfig); - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); - } + out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeOptionalWriteable(delayedDataCheckConfig); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index 933188c8221eb..9e01cd21e2b90 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.config; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -126,11 +125,7 @@ public AnalysisConfig(StreamInput in) throws IOException { bucketSpan = in.readTimeValue(); categorizationFieldName = in.readOptionalString(); categorizationFilters = in.readBoolean() ? Collections.unmodifiableList(in.readStringList()) : null; - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - categorizationAnalyzerConfig = in.readOptionalWriteable(CategorizationAnalyzerConfig::new); - } else { - categorizationAnalyzerConfig = null; - } + categorizationAnalyzerConfig = in.readOptionalWriteable(CategorizationAnalyzerConfig::new); latency = in.readOptionalTimeValue(); summaryCountFieldName = in.readOptionalString(); detectors = Collections.unmodifiableList(in.readList(Detector::new)); @@ -149,9 +144,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeOptionalWriteable(categorizationAnalyzerConfig); - } + out.writeOptionalWriteable(categorizationAnalyzerConfig); out.writeOptionalTimeValue(latency); out.writeOptionalString(summaryCountFieldName); out.writeList(detectors); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java index d335ba39e0026..8e04e001ed6cd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java @@ -138,12 +138,8 @@ public Bucket(StreamInput in) throws IOException { if (in.getVersion().before(Version.V_6_5_0)) { in.readList(Bucket::readOldPerPartitionNormalization); } - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - scheduledEvents = in.readStringList(); - if (scheduledEvents.isEmpty()) { - scheduledEvents = Collections.emptyList(); - } - } else { + scheduledEvents = in.readStringList(); + if (scheduledEvents.isEmpty()) { scheduledEvents = Collections.emptyList(); } } @@ -164,9 +160,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().before(Version.V_6_5_0)) { out.writeList(Collections.emptyList()); } - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeStringCollection(scheduledEvents); - } + out.writeStringCollection(scheduledEvents); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java index ed31f0cc020c6..3fdfaab060542 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java @@ -192,32 +192,18 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(grantType); - if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - out.writeOptionalString(username); - if (password == null) { - out.writeOptionalBytesReference(null); - } else { - final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); - try { - out.writeOptionalBytesReference(new BytesArray(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } - } - out.writeOptionalString(refreshToken); + out.writeOptionalString(username); + if (password == null) { + out.writeOptionalBytesReference(null); } else { - if ("refresh_token".equals(grantType)) { - throw new IllegalArgumentException("a refresh request cannot be sent to an older version"); - } else { - out.writeString(username); - final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); - try { - out.writeByteArray(passwordBytes); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } + final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); } } + out.writeOptionalString(refreshToken); out.writeOptionalString(scope); } @@ -225,29 +211,19 @@ public void writeTo(StreamOutput out) throws IOException { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); grantType = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - username = in.readOptionalString(); - BytesReference bytesRef = in.readOptionalBytesReference(); - if (bytesRef != null) { - byte[] bytes = BytesReference.toBytes(bytesRef); - try { - password = new SecureString(CharArrays.utf8BytesToChars(bytes)); - } finally { - Arrays.fill(bytes, (byte) 0); - } - } else { - password = null; - } - refreshToken = in.readOptionalString(); - } else { - username = in.readString(); - final byte[] passwordBytes = in.readByteArray(); + username = in.readOptionalString(); + BytesReference bytesRef = in.readOptionalBytesReference(); + if (bytesRef != null) { + byte[] bytes = BytesReference.toBytes(bytesRef); try { - password = new SecureString(CharArrays.utf8BytesToChars(passwordBytes)); + password = new SecureString(CharArrays.utf8BytesToChars(bytes)); } finally { - Arrays.fill(passwordBytes, (byte) 0); + Arrays.fill(bytes, (byte) 0); } + } else { + password = null; } + refreshToken = in.readOptionalString(); scope = in.readOptionalString(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java index 30111a92431dc..93ddc56459677 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java @@ -61,12 +61,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(scope); if (out.getVersion().onOrAfter(Version.V_6_5_0)) { out.writeOptionalString(refreshToken); - } else if (out.getVersion().onOrAfter(Version.V_6_2_0)) { - if (refreshToken == null) { - out.writeString(""); - } else { - out.writeString(refreshToken); - } } } @@ -78,8 +72,6 @@ public void readFrom(StreamInput in) throws IOException { scope = in.readOptionalString(); if (in.getVersion().onOrAfter(Version.V_6_5_0)) { refreshToken = in.readOptionalString(); - } else if (in.getVersion().onOrAfter(Version.V_6_2_0)) { - refreshToken = in.readString(); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 92d7bbcc49e54..1065503e091d4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -406,11 +406,13 @@ public void testSelectLeastLoadedMlNode_jobWithRulesButNoNodeMeetsRequiredVersio Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); + Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), + VersionUtils.getPreviousVersion(Version.V_6_4_0)); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.V_6_2_0)) + nodeAttr, Collections.emptySet(), version)) .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_6_3_0)) + nodeAttr, Collections.emptySet(), version)) .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -437,9 +439,9 @@ public void testSelectLeastLoadedMlNode_jobWithRulesAndNodeMeetsRequiredVersion( nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), Version.V_6_2_0)) + nodeAttr, Collections.emptySet(), Version.fromString("6.2.0"))) .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), Version.V_6_4_0)) + nodeAttr, Collections.emptySet(), Version.fromString("6.4.0"))) .build(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java index 1a06a9a4037f9..d644a63e7bcaa 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.yaml.ObjectPath; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; @@ -63,7 +64,7 @@ public void testEnabledDefault() { public void testUsage() throws Exception { // anything prior to 6.3 does not include collection_enabled (so defaults it to null) - final Version serializedVersion = randomFrom(Version.CURRENT, Version.V_6_3_0, Version.V_6_2_2); + final Version serializedVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); final boolean collectionEnabled = randomBoolean(); int localCount = randomIntBetween(0, 5); List exporterList = new ArrayList<>(); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index f73496db0f875..f17aab309ba72 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -266,9 +266,7 @@ public void testRollupAfterRestart() throws Exception { final Request clusterHealthRequest = new Request("GET", "/_cluster/health"); clusterHealthRequest.addParameter("wait_for_status", "yellow"); clusterHealthRequest.addParameter("wait_for_no_relocating_shards", "true"); - if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { - clusterHealthRequest.addParameter("wait_for_no_initializing_shards", "true"); - } + clusterHealthRequest.addParameter("wait_for_no_initializing_shards", "true"); Map clusterHealthResponse = entityAsMap(client().performRequest(clusterHealthRequest)); assertThat(clusterHealthResponse.get("timed_out"), equalTo(Boolean.FALSE)); @@ -384,9 +382,7 @@ private void waitForYellow(String indexName) throws IOException { request.addParameter("wait_for_status", "yellow"); request.addParameter("timeout", "30s"); request.addParameter("wait_for_no_relocating_shards", "true"); - if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { - request.addParameter("wait_for_no_initializing_shards", "true"); - } + request.addParameter("wait_for_no_initializing_shards", "true"); Map response = entityAsMap(client().performRequest(request)); assertThat(response.get("timed_out"), equalTo(Boolean.FALSE)); } From a3bd569a0f4b4014ba4992a8dc0d390cc1a17ff9 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 00:16:53 +0200 Subject: [PATCH 167/321] Cleanup Redundant BlobStoreFormat Class (#42195) * No need to have an abstract class here when there's only a single impl. --- .../blobstore/BlobStoreFormat.java | 111 ------------------ .../blobstore/ChecksumBlobStoreFormat.java | 92 +++++++++++---- 2 files changed, 71 insertions(+), 132 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java deleted file mode 100644 index 044caee41c55d..0000000000000 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.repositories.blobstore; - -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.CheckedFunction; -import org.elasticsearch.common.blobstore.BlobContainer; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.snapshots.SnapshotInfo; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; - -/** - * Base class that handles serialization of various data structures during snapshot/restore operations. - */ -public abstract class BlobStoreFormat { - - protected final String blobNameFormat; - - protected final CheckedFunction reader; - - protected final NamedXContentRegistry namedXContentRegistry; - - // Serialization parameters to specify correct context for metadata serialization - protected static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; - - static { - Map snapshotOnlyParams = new HashMap<>(); - // when metadata is serialized certain elements of the metadata shouldn't be included into snapshot - // exclusion of these elements is done by setting MetaData.CONTEXT_MODE_PARAM to MetaData.CONTEXT_MODE_SNAPSHOT - snapshotOnlyParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_SNAPSHOT); - // serialize SnapshotInfo using the SNAPSHOT mode - snapshotOnlyParams.put(SnapshotInfo.CONTEXT_MODE_PARAM, SnapshotInfo.CONTEXT_MODE_SNAPSHOT); - SNAPSHOT_ONLY_FORMAT_PARAMS = new ToXContent.MapParams(snapshotOnlyParams); - } - - /** - * @param blobNameFormat format of the blobname in {@link String#format(Locale, String, Object...)} format - * @param reader the prototype object that can deserialize objects with type T - */ - protected BlobStoreFormat(String blobNameFormat, CheckedFunction reader, - NamedXContentRegistry namedXContentRegistry) { - this.reader = reader; - this.blobNameFormat = blobNameFormat; - this.namedXContentRegistry = namedXContentRegistry; - } - - /** - * Reads and parses the blob with given blob name. - * - * @param blobContainer blob container - * @param blobName blob name - * @return parsed blob object - */ - public abstract T readBlob(BlobContainer blobContainer, String blobName) throws IOException; - - /** - * Reads and parses the blob with given name, applying name translation using the {link #blobName} method - * - * @param blobContainer blob container - * @param name name to be translated into - * @return parsed blob object - */ - public T read(BlobContainer blobContainer, String name) throws IOException { - String blobName = blobName(name); - return readBlob(blobContainer, blobName); - } - - /** - * Deletes obj in the blob container - */ - public void delete(BlobContainer blobContainer, String name) throws IOException { - blobContainer.deleteBlob(blobName(name)); - } - - public String blobName(String name) { - return String.format(Locale.ROOT, blobNameFormat, name); - } - - protected T read(BytesReference bytes) throws IOException { - try (XContentParser parser = XContentHelper - .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes)) { - return reader.apply(parser); - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index d4d009b8ad63e..d216fe3234e83 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.OutputStreamIndexOutput; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.blobstore.BlobContainer; @@ -33,24 +34,43 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.gateway.CorruptStateException; +import org.elasticsearch.snapshots.SnapshotInfo; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; /** * Snapshot metadata file format used in v2.0 and above */ -public class ChecksumBlobStoreFormat extends BlobStoreFormat { +public final class ChecksumBlobStoreFormat { + + // Serialization parameters to specify correct context for metadata serialization + private static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; + + static { + Map snapshotOnlyParams = new HashMap<>(); + // when metadata is serialized certain elements of the metadata shouldn't be included into snapshot + // exclusion of these elements is done by setting MetaData.CONTEXT_MODE_PARAM to MetaData.CONTEXT_MODE_SNAPSHOT + snapshotOnlyParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_SNAPSHOT); + // serialize SnapshotInfo using the SNAPSHOT mode + snapshotOnlyParams.put(SnapshotInfo.CONTEXT_MODE_PARAM, SnapshotInfo.CONTEXT_MODE_SNAPSHOT); + SNAPSHOT_ONLY_FORMAT_PARAMS = new ToXContent.MapParams(snapshotOnlyParams); + } private static final XContentType DEFAULT_X_CONTENT_TYPE = XContentType.SMILE; @@ -59,12 +79,18 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm private static final int BUFFER_SIZE = 4096; - protected final XContentType xContentType; + private final XContentType xContentType; - protected final boolean compress; + private final boolean compress; private final String codec; + private final String blobNameFormat; + + private final CheckedFunction reader; + + private final NamedXContentRegistry namedXContentRegistry; + /** * @param codec codec name * @param blobNameFormat format of the blobname in {@link String#format} format @@ -74,7 +100,9 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm */ public ChecksumBlobStoreFormat(String codec, String blobNameFormat, CheckedFunction reader, NamedXContentRegistry namedXContentRegistry, boolean compress, XContentType xContentType) { - super(blobNameFormat, reader, namedXContentRegistry); + this.reader = reader; + this.blobNameFormat = blobNameFormat; + this.namedXContentRegistry = namedXContentRegistry; this.xContentType = xContentType; this.compress = compress; this.codec = codec; @@ -91,6 +119,29 @@ public ChecksumBlobStoreFormat(String codec, String blobNameFormat, CheckedFunct this(codec, blobNameFormat, reader, namedXContentRegistry, compress, DEFAULT_X_CONTENT_TYPE); } + /** + * Reads and parses the blob with given name, applying name translation using the {link #blobName} method + * + * @param blobContainer blob container + * @param name name to be translated into + * @return parsed blob object + */ + public T read(BlobContainer blobContainer, String name) throws IOException { + String blobName = blobName(name); + return readBlob(blobContainer, blobName); + } + + /** + * Deletes obj in the blob container + */ + public void delete(BlobContainer blobContainer, String name) throws IOException { + blobContainer.deleteBlob(blobName(name)); + } + + public String blobName(String name) { + return String.format(Locale.ROOT, blobNameFormat, name); + } + /** * Reads blob with specified name without resolving the blobName using using {@link #blobName} method. * @@ -108,8 +159,10 @@ public T readBlob(BlobContainer blobContainer, String blobName) throws IOExcepti CodecUtil.checkHeader(indexInput, codec, VERSION, VERSION); long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; - BytesReference bytesReference = new BytesArray(bytes, (int) filePointer, (int) contentSize); - return read(bytesReference); + try (XContentParser parser = XContentHelper.createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, + new BytesArray(bytes, (int) filePointer, (int) contentSize))) { + return reader.apply(parser); + } } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we trick this into a dedicated exception with the original stacktrace throw new CorruptStateException(ex); @@ -156,7 +209,17 @@ public void write(T obj, BlobContainer blobContainer, String name) throws IOExce } private void writeTo(final T obj, final String blobName, final CheckedConsumer consumer) throws IOException { - final BytesReference bytes = write(obj); + final BytesReference bytes; + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + if (compress) { + try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) { + write(obj, compressedStreamOutput); + } + } else { + write(obj, bytesStreamOutput); + } + bytes = bytesStreamOutput.bytes(); + } try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")"; try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, outputStream, BUFFER_SIZE)) { @@ -176,20 +239,7 @@ public void close() { } } - protected BytesReference write(T obj) throws IOException { - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - if (compress) { - try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) { - write(obj, compressedStreamOutput); - } - } else { - write(obj, bytesStreamOutput); - } - return bytesStreamOutput.bytes(); - } - } - - protected void write(T obj, StreamOutput streamOutput) throws IOException { + private void write(T obj, StreamOutput streamOutput) throws IOException { try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType, streamOutput)) { builder.startObject(); obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); From bb2ec18f672d850c043633f154cc196196995b29 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 21 May 2019 19:13:39 -0400 Subject: [PATCH 168/321] Fix off-by-one error in an index shard test There is an off-by-one error in this test. It leads to the recovery thread never being started, and that means joining on it will wait indefinitely. This commit addresses that by fixing the off-by-one error. Closes #42325 --- .../index/shard/IndexShardTests.java | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 64b0c0db1dc8c..64886af18332a 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1736,7 +1736,6 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { closeShards(shard); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42325") public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { final IndexShard shard = newStartedShard(true); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); @@ -1754,12 +1753,11 @@ public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { recoveryThread.start(); final int numberOfAcquisitions = randomIntBetween(1, 10); - final int recoveryIndex = randomIntBetween(1, numberOfAcquisitions); + final List assertions = new ArrayList<>(numberOfAcquisitions); + final int recoveryIndex = randomIntBetween(0, numberOfAcquisitions - 1); for (int i = 0; i < numberOfAcquisitions; i++) { - final PlainActionFuture onLockAcquired; - final Runnable assertion; if (i < recoveryIndex) { final AtomicBoolean invoked = new AtomicBoolean(); onLockAcquired = new PlainActionFuture<>() { @@ -1777,26 +1775,29 @@ public void onFailure(Exception e) { } }; - assertion = () -> assertTrue(invoked.get()); + assertions.add(() -> assertTrue(invoked.get())); } else if (recoveryIndex == i) { startRecovery.countDown(); relocationStarted.await(); onLockAcquired = new PlainActionFuture<>(); - assertion = () -> { + assertions.add(() -> { final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); - }; + }); } else { onLockAcquired = new PlainActionFuture<>(); - assertion = () -> { + assertions.add(() -> { final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); - }; + }); } shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.WRITE, "i_" + i); + } + + for (final Runnable assertion : assertions) { assertion.run(); } From c1aef4bd558a84bcab9f7cf485b72d6a5e5aa601 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 21 May 2019 19:45:52 -0400 Subject: [PATCH 169/321] Estimate num history ops should always use translog (#42211) Currently, we ignore soft-deletes in peer recovery, thus estimateNumberOfHistoryOperations should always use translog. Relates #38904 --- .../elasticsearch/index/engine/InternalEngine.java | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index a85c4f981d1b3..24d1078510c0b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -513,15 +513,8 @@ public Translog.Snapshot readHistoryOperations(String source, MapperService mapp * Returns the estimated number of history operations whose seq# at least the provided seq# in this engine. */ @Override - public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { - if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { - try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), - Long.MAX_VALUE, false)) { - return snapshot.totalOperations(); - } - } else { - return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo); - } + public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) { + return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo); } @Override From 57859413eaf1f59357eb6a9875ca0ae51a76bbb3 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 21 May 2019 19:54:46 -0400 Subject: [PATCH 170/321] Skip global checkpoint sync for closed indices (#41874) The verifying-before-close step ensures the global checkpoints on all shard copies are in sync; thus, we don' t need to sync global checkpoints for closed indices. Relate #33888 --- .../elasticsearch/index/shard/IndexShard.java | 4 +-- .../index/shard/IndexShardTests.java | 27 +++++++++++++++++++ .../indices/state/CloseIndexIT.java | 27 +++++++++++++++++++ 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index da5ee8f8363ff..fdd95614756b7 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2136,8 +2136,8 @@ public void maybeSyncGlobalCheckpoint(final String reason) { StreamSupport .stream(globalCheckpoints.values().spliterator(), false) .anyMatch(v -> v.value < globalCheckpoint); - // only sync if there is a shard lagging the primary - if (syncNeeded) { + // only sync if index is not closed and there is a shard lagging the primary + if (syncNeeded && indexSettings.getIndexMetaData().getState() == IndexMetaData.State.OPEN) { logger.trace("syncing global checkpoint for [{}]", reason); globalCheckpointSyncer.run(); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 64886af18332a..04ef68852cc3f 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1140,6 +1140,33 @@ public void testGlobalCheckpointSync() throws IOException { closeShards(replicaShard, primaryShard); } + public void testClosedIndicesSkipSyncGlobalCheckpoint() throws Exception { + ShardId shardId = new ShardId("index", "_na_", 0); + IndexMetaData.Builder indexMetadata = IndexMetaData.builder("index") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)) + .state(IndexMetaData.State.CLOSE).primaryTerm(0, 1); + ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(8), true, + ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); + AtomicBoolean synced = new AtomicBoolean(); + IndexShard primaryShard = newShard(shardRouting, indexMetadata.build(), null, new InternalEngineFactory(), + () -> synced.set(true), RetentionLeaseSyncer.EMPTY); + recoverShardFromStore(primaryShard); + IndexShard replicaShard = newShard(shardId, false); + recoverReplica(replicaShard, primaryShard, true); + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + indexDoc(primaryShard, "_doc", Integer.toString(i)); + } + assertThat(primaryShard.getLocalCheckpoint(), equalTo(numDocs - 1L)); + primaryShard.updateLocalCheckpointForShard(replicaShard.shardRouting.allocationId().getId(), primaryShard.getLocalCheckpoint()); + long globalCheckpointOnReplica = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, primaryShard.getLocalCheckpoint()); + primaryShard.updateGlobalCheckpointForShard(replicaShard.shardRouting.allocationId().getId(), globalCheckpointOnReplica); + primaryShard.maybeSyncGlobalCheckpoint("test"); + assertFalse("closed indices should skip global checkpoint sync", synced.get()); + closeShards(primaryShard, replicaShard); + } + public void testRestoreLocalHistoryFromTranslogOnPromotion() throws IOException, InterruptedException { final IndexShard indexShard = newStartedShard(false); final int operations = 1024 - scaledRandomIntBetween(0, 1024); diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java index 740034f12ecc5..6f666483b18d0 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -36,7 +36,9 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndexClosedException; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; @@ -421,6 +423,31 @@ public Settings onNodeStopped(String nodeName) throws Exception { } } + public void testResyncPropagatePrimaryTerm() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + final String indexName = "closed_indices_promotion"; + createIndex(indexName, Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) + .build()); + indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, randomIntBetween(0, 50)) + .mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("num", n)).collect(toList())); + ensureGreen(indexName); + assertAcked(client().admin().indices().prepareClose(indexName)); + assertIndexIsClosed(indexName); + ensureGreen(indexName); + String nodeWithPrimary = clusterService().state().nodes().get(clusterService().state() + .routingTable().index(indexName).shard(0).primaryShard().currentNodeId()).getName(); + internalCluster().restartNode(nodeWithPrimary, new InternalTestCluster.RestartCallback()); + ensureGreen(indexName); + long primaryTerm = clusterService().state().metaData().index(indexName).primaryTerm(0); + for (String nodeName : internalCluster().nodesInclude(indexName)) { + IndexShard shard = internalCluster().getInstance(IndicesService.class, nodeName) + .indexService(resolveIndex(indexName)).getShard(0); + assertThat(shard.routingEntry().toString(), shard.getOperationPrimaryTerm(), equalTo(primaryTerm)); + } + } + static void assertIndexIsClosed(final String... indices) { final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); for (String index : indices) { From 75be2a669e1e7e38cfc0e7b55bf99c792fb8925f Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 21 May 2019 22:35:51 -0400 Subject: [PATCH 171/321] Peer recovery should flush at the end (#41660) Flushing at the end of a peer recovery (if needed) can bring these benefits: 1. Closing an index won't end up with the red state for a recovering replica should always be ready for closing whether it performs the verifying-before-close step or not. 2. Good opportunities to compact store (i.e., flushing and merging Lucene, and trimming translog) Closes #40024 Closes #39588 --- .../indices/recovery/RecoveryTarget.java | 10 +++++ .../indices/recovery/IndexRecoveryIT.java | 42 +++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 1f2c9a0f578cc..b3c6d12ab96e3 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -27,6 +27,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -38,6 +39,7 @@ import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLeases; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardNotRecoveringException; import org.elasticsearch.index.shard.IndexShardState; @@ -298,11 +300,19 @@ public void finalizeRecovery(final long globalCheckpoint, ActionListener l // Persist the global checkpoint. indexShard.sync(); indexShard.persistRetentionLeases(); + if (hasUncommittedOperations()) { + indexShard.flush(new FlushRequest().force(true).waitIfOngoing(true)); + } indexShard.finalizeRecovery(); return null; }); } + private boolean hasUncommittedOperations() throws IOException { + long localCheckpointOfCommit = Long.parseLong(indexShard.commitStats().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + return indexShard.estimateNumberOfHistoryOperations("peer-recovery", localCheckpointOfCommit + 1) > 0; + } + @Override public void handoffPrimaryContext(final ReplicationTracker.PrimaryContext primaryContext) { indexShard.activateWithPrimaryContext(primaryContext); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 4196472334ca9..3130cebad7097 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -52,9 +53,12 @@ import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.recovery.RecoveryStats; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.indices.flush.SyncedFlushUtil; import org.elasticsearch.indices.recovery.RecoveryState.Stage; import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; import org.elasticsearch.plugins.AnalysisPlugin; @@ -84,14 +88,19 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -910,6 +919,39 @@ public void testDoNotInfinitelyWaitForMapping() { assertHitCount(client().prepareSearch().get(), numDocs); } + public void testRecoveryFlushReplica() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + String indexName = "test-index"; + createIndex(indexName, Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", 1).build()); + int numDocs = randomIntBetween(0, 10); + indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, numDocs) + .mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("num", n)).collect(toList())); + assertAcked(client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put("index.number_of_replicas", 1))); + ensureGreen(indexName); + ShardId shardId = null; + for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) { + shardId = shardStats.getShardRouting().shardId(); + if (shardStats.getShardRouting().primary() == false) { + assertThat(shardStats.getCommitStats().getNumDocs(), equalTo(numDocs)); + SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( + shardStats.getCommitStats().getUserData().entrySet()); + assertThat(commitInfo.localCheckpoint, equalTo(shardStats.getSeqNoStats().getLocalCheckpoint())); + assertThat(commitInfo.maxSeqNo, equalTo(shardStats.getSeqNoStats().getMaxSeqNo())); + } + } + SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); + assertBusy(() -> assertThat(client().admin().indices().prepareSyncedFlush(indexName).get().failedShards(), equalTo(0))); + assertAcked(client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put("index.number_of_replicas", 2))); + ensureGreen(indexName); + // Recovery should keep syncId if no indexing activity on the primary after synced-flush. + Set syncIds = Stream.of(client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) + .map(shardStats -> shardStats.getCommitStats().syncId()) + .collect(Collectors.toSet()); + assertThat(syncIds, hasSize(1)); + } + public static final class TestAnalysisPlugin extends Plugin implements AnalysisPlugin { final AtomicBoolean throwParsingError = new AtomicBoolean(); @Override From e5722145a629bd2afb8499f76ac5a5b2c9136ac2 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 21 May 2019 20:35:39 -0700 Subject: [PATCH 172/321] Remove the 6.7 version constants. (#42039) This PR removes all constants of the form `Version.V_6_7_*`, since master no longer needs to account for them. Relates to #41164. --- .../reindex/TransportUpdateByQueryAction.java | 10 ++---- .../main/java/org/elasticsearch/Build.java | 7 +--- .../elasticsearch/ElasticsearchException.java | 8 ++--- .../main/java/org/elasticsearch/Version.java | 12 ------- .../index/engine/ReadOnlyEngine.java | 2 +- .../recovery/RecoverySourceHandler.java | 4 +-- .../java/org/elasticsearch/BuildTests.java | 31 +---------------- .../ExceptionSerializationTests.java | 2 +- .../java/org/elasticsearch/VersionTests.java | 1 - .../action/shard/ShardStateActionTests.java | 6 +--- .../cluster/block/ClusterBlockTests.java | 2 +- .../xpack/core/ccr/AutoFollowStats.java | 25 +++++--------- .../action/PutAutoFollowPatternAction.java | 33 ++----------------- .../core/ccr/action/PutFollowAction.java | 9 ++--- .../deprecation/DeprecationInfoAction.java | 12 ++----- .../xpack/core/ml/MlMetadata.java | 20 +++-------- .../security/authc/TokenServiceTests.java | 12 +++---- 17 files changed, 37 insertions(+), 159 deletions(-) diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java index 5ea175d11a7cb..410ae1b51116d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; @@ -82,18 +81,13 @@ protected void doExecute(Task task, UpdateByQueryRequest request, ActionListener */ static class AsyncIndexBySearchAction extends AbstractAsyncBulkByScrollAction { - private final boolean useSeqNoForCAS; - AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, TransportUpdateByQueryAction action, UpdateByQueryRequest request, ClusterState clusterState, ActionListener listener) { super(task, - // not all nodes support sequence number powered optimistic concurrency control, we fall back to version - clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0) == false, - // all nodes support sequence number powered optimistic concurrency control and we can use it - clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0), + // use sequence number powered optimistic concurrency control + false, true, logger, client, threadPool, action, request, listener); - useSeqNoForCAS = clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0); } @Override diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 1b1cd8d3e720a..1a1ee2744f77a 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -254,12 +254,7 @@ public static void writeBuild(Build build, StreamOutput out) throws IOException out.writeString(build.flavor().displayName()); } if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - final Type buildType; - if (out.getVersion().before(Version.V_6_7_0) && build.type() == Type.DOCKER) { - buildType = Type.TAR; - } else { - buildType = build.type(); - } + final Type buildType = build.type(); out.writeString(buildType.displayName()); } out.writeString(build.shortHash()); diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index a2e53a1189f1b..260b443a6a557 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1010,19 +1010,19 @@ private enum ElasticsearchExceptionHandle { COORDINATION_STATE_REJECTED_EXCEPTION(org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, Version.V_7_0_0), SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class, - org.elasticsearch.snapshots.SnapshotInProgressException::new, 151, Version.V_6_7_0), + org.elasticsearch.snapshots.SnapshotInProgressException::new, 151, UNKNOWN_VERSION_ADDED), NO_SUCH_REMOTE_CLUSTER_EXCEPTION(org.elasticsearch.transport.NoSuchRemoteClusterException.class, - org.elasticsearch.transport.NoSuchRemoteClusterException::new, 152, Version.V_6_7_0), + org.elasticsearch.transport.NoSuchRemoteClusterException::new, 152, UNKNOWN_VERSION_ADDED), RETENTION_LEASE_ALREADY_EXISTS_EXCEPTION( org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException.class, org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException::new, 153, - Version.V_6_7_0), + UNKNOWN_VERSION_ADDED), RETENTION_LEASE_NOT_FOUND_EXCEPTION( org.elasticsearch.index.seqno.RetentionLeaseNotFoundException.class, org.elasticsearch.index.seqno.RetentionLeaseNotFoundException::new, 154, - Version.V_6_7_0), + UNKNOWN_VERSION_ADDED), SHARD_NOT_IN_PRIMARY_MODE_EXCEPTION( org.elasticsearch.index.shard.ShardNotInPrimaryModeException.class, org.elasticsearch.index.shard.ShardNotInPrimaryModeException::new, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 90b7ae869e811..5089a7fe0cec9 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -78,12 +78,6 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_6_1 = new Version(V_6_6_1_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_6_2_ID = 6060299; public static final Version V_6_6_2 = new Version(V_6_6_2_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); - public static final int V_6_7_0_ID = 6070099; - public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); - public static final int V_6_7_1_ID = 6070199; - public static final Version V_6_7_1 = new Version(V_6_7_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); - public static final int V_6_7_2_ID = 6070299; - public static final Version V_6_7_2 = new Version(V_6_7_2_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_8_0_ID = 6080099; public static final Version V_6_8_0 = new Version(V_6_8_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_8_1_ID = 6080199; @@ -130,12 +124,6 @@ public static Version fromId(int id) { return V_6_8_1; case V_6_8_0_ID: return V_6_8_0; - case V_6_7_1_ID: - return V_6_7_1; - case V_6_7_2_ID: - return V_6_7_2; - case V_6_7_0_ID: - return V_6_7_0; case V_6_6_2_ID: return V_6_6_2; case V_6_6_1_ID: diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index b981bdb8a8421..e7e0c4d927851 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -131,7 +131,7 @@ protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStat // that guarantee that all operations have been flushed to Lucene. final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated(); if (indexVersionCreated.onOrAfter(Version.V_7_2_0) || - (seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO && indexVersionCreated.onOrAfter(Version.V_6_7_0))) { + (seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO)) { if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) { throw new IllegalStateException("Maximum sequence number [" + seqNoStats.getMaxSeqNo() + "] from last commit does not match global checkpoint [" + seqNoStats.getGlobalCheckpoint() + "]"); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index aad460b821e62..4e82798e34128 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -30,7 +30,6 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.StepListener; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -112,8 +111,7 @@ public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recov this.shardId = this.request.shardId().id(); this.logger = Loggers.getLogger(getClass(), request.shardId(), "recover to " + request.targetNode().getName()); this.chunkSizeInBytes = fileChunkSizeInBytes; - // if the target is on an old version, it won't be able to handle out-of-order file chunks. - this.maxConcurrentFileChunks = request.targetNode().getVersion().onOrAfter(Version.V_6_7_0) ? maxConcurrentFileChunks : 1; + this.maxConcurrentFileChunks = maxConcurrentFileChunks; } public StartRecoveryRequest getRequest() { diff --git a/server/src/test/java/org/elasticsearch/BuildTests.java b/server/src/test/java/org/elasticsearch/BuildTests.java index 59e289b9e98ef..1945c51d1514f 100644 --- a/server/src/test/java/org/elasticsearch/BuildTests.java +++ b/server/src/test/java/org/elasticsearch/BuildTests.java @@ -30,7 +30,6 @@ import java.io.InputStream; import java.net.URL; import java.util.Arrays; -import java.util.List; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -193,35 +192,7 @@ public void testSerialization() { throw new AssertionError(); }); } - - public void testSerializationBWC() throws IOException { - final WriteableBuild dockerBuild = new WriteableBuild(new Build(randomFrom(Build.Flavor.values()), Build.Type.DOCKER, - randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6))); - - final List versions = Version.getDeclaredVersions(Version.class); - final Version post63Pre67Version = randomFrom(versions.stream() - .filter(v -> v.onOrAfter(Version.V_6_3_0) && v.before(Version.V_6_7_0)).collect(Collectors.toList())); - final Version post67Pre70Version = randomFrom(versions.stream() - .filter(v -> v.onOrAfter(Version.V_6_7_0) && v.before(Version.V_7_0_0)).collect(Collectors.toList())); - final Version post70Version = randomFrom(versions.stream().filter(v -> v.onOrAfter(Version.V_7_0_0)).collect(Collectors.toList())); - - final WriteableBuild post63pre67 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post63Pre67Version); - final WriteableBuild post67pre70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post67Pre70Version); - final WriteableBuild post70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post70Version); - - assertThat(post63pre67.build.flavor(), equalTo(dockerBuild.build.flavor())); - assertThat(post67pre70.build.flavor(), equalTo(dockerBuild.build.flavor())); - assertThat(post70.build.flavor(), equalTo(dockerBuild.build.flavor())); - - assertThat(post63pre67.build.type(), equalTo(Build.Type.TAR)); - assertThat(post67pre70.build.type(), equalTo(dockerBuild.build.type())); - assertThat(post70.build.type(), equalTo(dockerBuild.build.type())); - - assertThat(post63pre67.build.getQualifiedVersion(), equalTo(post63Pre67Version.toString())); - assertThat(post67pre70.build.getQualifiedVersion(), equalTo(post67Pre70Version.toString())); - assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); - } - + public void testFlavorParsing() { for (final Build.Flavor flavor : Build.Flavor.values()) { // strict or not should not impact parsing at all here diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 5b33068013965..61d8532b5652a 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -888,7 +888,7 @@ public void testShardLockObtainFailedException() throws IOException { public void testSnapshotInProgressException() throws IOException { SnapshotInProgressException orig = new SnapshotInProgressException("boom"); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_7_0, Version.CURRENT); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); SnapshotInProgressException ex = serialize(orig, version); assertEquals(orig.getMessage(), ex.getMessage()); } diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 21a18e4a26ba5..66d7af0a4b20e 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -346,7 +346,6 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); assertFalse(isCompatible(Version.V_6_6_0, Version.V_7_0_0)); - assertFalse(isCompatible(Version.V_6_7_0, Version.V_7_0_0)); assertTrue(isCompatible(Version.V_6_8_0, Version.V_7_0_0)); assertFalse(isCompatible(Version.fromId(2000099), Version.V_7_0_0)); assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_5_0)); diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index bcd080e0b45fd..7eb995e04f3fa 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -519,11 +519,7 @@ public void testStartedShardEntrySerialization() throws Exception { final StartedShardEntry deserialized = new StartedShardEntry(in); assertThat(deserialized.shardId, equalTo(shardId)); assertThat(deserialized.allocationId, equalTo(allocationId)); - if (version.onOrAfter(Version.V_6_7_0)) { - assertThat(deserialized.primaryTerm, equalTo(primaryTerm)); - } else { - assertThat(deserialized.primaryTerm, equalTo(0L)); - } + assertThat(deserialized.primaryTerm, equalTo(primaryTerm)); assertThat(deserialized.message, equalTo(message)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java index 51a34d94b3a05..8d68684c9c368 100644 --- a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java @@ -130,7 +130,7 @@ private ClusterBlock randomClusterBlock() { } private ClusterBlock randomClusterBlock(final Version version) { - final String uuid = (version.onOrAfter(Version.V_6_7_0) && randomBoolean()) ? UUIDs.randomBase64UUID() : null; + final String uuid = randomBoolean() ? UUIDs.randomBase64UUID() : null; final List levels = Arrays.asList(ClusterBlockLevel.values()); return new ClusterBlock(randomInt(), uuid, "cluster block #" + randomInt(), randomBoolean(), randomBoolean(), randomBoolean(), randomFrom(RestStatus.values()), copyOf(randomSubsetOf(randomIntBetween(1, levels.size()), levels))); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java index 600bd5fced3ae..6c605ede85e24 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java @@ -116,15 +116,9 @@ public AutoFollowStats(StreamInput in) throws IOException { numberOfFailedFollowIndices = in.readVLong(); numberOfFailedRemoteClusterStateRequests = in.readVLong(); numberOfSuccessfulFollowIndices = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - // note: the casts to the following Writeable.Reader instances are needed by some IDEs (e.g. Eclipse 4.8) as a compiler help - recentAutoFollowErrors = new TreeMap<>(in.readMap((Writeable.Reader) StreamInput::readString, - (Writeable.Reader>) in1 -> new Tuple<>(in1.readZLong(), in1.readException()))); - } else { - // note: the casts to the following Writeable.Reader instances are needed by some IDEs (e.g. Eclipse 4.8) as a compiler help - recentAutoFollowErrors = new TreeMap<>(in.readMap((Writeable.Reader) StreamInput::readString, - (Writeable.Reader>) in1 -> new Tuple<>(-1L, in1.readException()))); - } + // note: the casts to the following Writeable.Reader instances are needed by some IDEs (e.g. Eclipse 4.8) as a compiler help + recentAutoFollowErrors = new TreeMap<>(in.readMap((Writeable.Reader) StreamInput::readString, + (Writeable.Reader>) in1 -> new Tuple<>(in1.readZLong(), in1.readException()))); if (in.getVersion().onOrAfter(Version.V_6_6_0)) { autoFollowedClusters = new TreeMap<>(in.readMap(StreamInput::readString, AutoFollowedCluster::new)); } else { @@ -137,14 +131,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(numberOfFailedFollowIndices); out.writeVLong(numberOfFailedRemoteClusterStateRequests); out.writeVLong(numberOfSuccessfulFollowIndices); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - out.writeMap(recentAutoFollowErrors, StreamOutput::writeString, (out1, value) -> { - out1.writeZLong(value.v1()); - out1.writeException(value.v2()); - }); - } else { - out.writeMap(recentAutoFollowErrors, StreamOutput::writeString, (out1, value) -> out1.writeException(value.v2())); - } + out.writeMap(recentAutoFollowErrors, StreamOutput::writeString, (out1, value) -> { + out1.writeZLong(value.v1()); + out1.writeException(value.v2()); + }); + if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeMap(autoFollowedClusters, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index ec946ce51e821..f26e8d7f82a51 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -5,14 +5,12 @@ */ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -153,21 +151,7 @@ public Request(StreamInput in) throws IOException { remoteCluster = in.readString(); leaderIndexPatterns = in.readStringList(); followIndexNamePattern = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - parameters = new FollowParameters(in); - } else { - parameters = new FollowParameters(); - parameters.maxReadRequestOperationCount = in.readOptionalVInt(); - parameters.maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); - parameters.maxOutstandingReadRequests = in.readOptionalVInt(); - parameters.maxWriteRequestOperationCount = in.readOptionalVInt(); - parameters.maxWriteRequestSize = in.readOptionalWriteable(ByteSizeValue::new); - parameters.maxOutstandingWriteRequests = in.readOptionalVInt(); - parameters.maxWriteBufferCount = in.readOptionalVInt(); - parameters.maxWriteBufferSize = in.readOptionalWriteable(ByteSizeValue::new); - parameters.maxRetryDelay = in.readOptionalTimeValue(); - parameters.readPollTimeout = in.readOptionalTimeValue(); - } + parameters = new FollowParameters(in); } @Override @@ -177,20 +161,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexNamePattern); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - parameters.writeTo(out); - } else { - out.writeOptionalVInt(parameters.maxReadRequestOperationCount); - out.writeOptionalWriteable(parameters.maxReadRequestSize); - out.writeOptionalVInt(parameters.maxOutstandingReadRequests); - out.writeOptionalVInt(parameters.maxWriteRequestOperationCount); - out.writeOptionalWriteable(parameters.maxWriteRequestSize); - out.writeOptionalVInt(parameters.maxOutstandingWriteRequests); - out.writeOptionalVInt(parameters.maxWriteBufferCount); - out.writeOptionalWriteable(parameters.maxWriteBufferSize); - out.writeOptionalTimeValue(parameters.maxRetryDelay); - out.writeOptionalTimeValue(parameters.readPollTimeout); - } + parameters.writeTo(out); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 89c18a9824ab4..4d20e6d820de2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -169,9 +168,7 @@ public Request(StreamInput in) throws IOException { this.leaderIndex = in.readString(); this.followerIndex = in.readString(); this.parameters = new FollowParameters(in); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - waitForActiveShards(ActiveShardCount.readFrom(in)); - } + waitForActiveShards(ActiveShardCount.readFrom(in)); } @Override @@ -181,9 +178,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(leaderIndex); out.writeString(followerIndex); parameters.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - waitForActiveShards.writeTo(out); - } + waitForActiveShards.writeTo(out); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java index 28aa09f6c1efb..54d260e32532f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.deprecation; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -29,7 +28,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -125,11 +123,7 @@ public void readFrom(StreamInput in) throws IOException { clusterSettingsIssues = in.readList(DeprecationIssue::new); nodeSettingsIssues = in.readList(DeprecationIssue::new); indexSettingsIssues = in.readMapOfLists(StreamInput::readString, DeprecationIssue::new); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - mlSettingsIssues = in.readList(DeprecationIssue::new); - } else { - mlSettingsIssues = Collections.emptyList(); - } + mlSettingsIssues = in.readList(DeprecationIssue::new); } @Override @@ -138,9 +132,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeList(clusterSettingsIssues); out.writeList(nodeSettingsIssues); out.writeMapOfLists(indexSettingsIssues, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - out.writeList(mlSettingsIssues); - } + out.writeList(mlSettingsIssues); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index 0f502577195dd..dfe5560da3303 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -138,20 +138,14 @@ public MlMetadata(StreamInput in) throws IOException { } this.datafeeds = datafeeds; this.groupOrJobLookup = new GroupOrJobLookup(jobs.values()); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - this.upgradeMode = in.readBoolean(); - } else { - this.upgradeMode = false; - } + this.upgradeMode = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { writeMap(jobs, out); writeMap(datafeeds, out); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - out.writeBoolean(upgradeMode); - } + out.writeBoolean(upgradeMode); } private static void writeMap(Map map, StreamOutput out) throws IOException { @@ -202,11 +196,7 @@ public MlMetadataDiff(StreamInput in) throws IOException { MlMetadataDiff::readJobDiffFrom); this.datafeeds = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), DatafeedConfig::new, MlMetadataDiff::readDatafeedDiffFrom); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - upgradeMode = in.readBoolean(); - } else { - upgradeMode = false; - } + upgradeMode = in.readBoolean(); } /** @@ -225,9 +215,7 @@ public MetaData.Custom apply(MetaData.Custom part) { public void writeTo(StreamOutput out) throws IOException { jobs.writeTo(out); datafeeds.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - out.writeBoolean(upgradeMode); - } + out.writeBoolean(upgradeMode); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 42101b1f4ec97..49796333098ff 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -137,7 +137,7 @@ public void setupClient() { // tokens docs on a separate index), let's test the TokenService works in a mixed cluster with nodes with versions prior to these // developments if (randomBoolean()) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_0_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } } @@ -211,7 +211,7 @@ public void testRotateKey() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used if (null == oldNode) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); @@ -272,7 +272,7 @@ public void testKeyExchange() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used if (null == oldNode) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } int numRotations = randomIntBetween(1, 5); for (int i = 0; i < numRotations; i++) { @@ -314,7 +314,7 @@ public void testPruneKeys() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used if (null == oldNode) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); @@ -384,7 +384,7 @@ public void testPassphraseWorks() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used if (null == oldNode) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); @@ -418,7 +418,7 @@ public void testGetTokenWhenKeyCacheHasExpired() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used if (null == oldNode) { - oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_7_0_0, Version.V_7_1_0)); } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); From f00716389b974a81317f04fd2d693f05e0965007 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 22 May 2019 08:20:25 +0300 Subject: [PATCH 173/321] Allow Kibana user to use the OpenID Connect APIs (#42305) Add the manage_oidc privilege to the kibana user and to the role privileges list --- .../elasticsearch/client/security/user/privileges/Role.java | 3 ++- .../xpack/core/security/authz/store/ReservedRolesStore.java | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java index c6dc6910d97b0..a3263e7f6e920 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java @@ -311,6 +311,7 @@ public static class ClusterPrivilegeName { public static final String TRANSPORT_CLIENT = "transport_client"; public static final String MANAGE_SECURITY = "manage_security"; public static final String MANAGE_SAML = "manage_saml"; + public static final String MANAGE_OIDC = "manage_oidc"; public static final String MANAGE_TOKEN = "manage_token"; public static final String MANAGE_PIPELINE = "manage_pipeline"; public static final String MANAGE_CCR = "manage_ccr"; @@ -319,7 +320,7 @@ public static class ClusterPrivilegeName { public static final String READ_ILM = "read_ilm"; public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_ML, MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT, - MANAGE_SECURITY, MANAGE_SAML, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_CCR, READ_CCR, MANAGE_ILM, READ_ILM }; + MANAGE_SECURITY, MANAGE_SAML, MANAGE_OIDC, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_CCR, READ_CCR, MANAGE_ILM, READ_ILM}; } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index b767b56086159..2c86971b529f9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -109,7 +109,7 @@ private static Map initializeReservedRoles() { null)) .put(KibanaUser.ROLE_NAME, new RoleDescriptor(KibanaUser.ROLE_NAME, new String[] { - "monitor", "manage_index_templates", MonitoringBulkAction.NAME, "manage_saml", "manage_token" + "monitor", "manage_index_templates", MonitoringBulkAction.NAME, "manage_saml", "manage_token", "manage_oidc" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() From 18bff0c76417643ce8ceb7b77042d301dfda00b8 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 22 May 2019 08:20:51 +0300 Subject: [PATCH 174/321] Ensure SHA256 is not used in tests (#42289) SHA256 was recently added to the Hasher class in order to be used in the TokenService. A few tests were still using values() to get the available algorithms from the Enum and it could happen that SHA256 would be picked up by these. This change adds an extra convenience method (Hasher#getAvailableAlgoCacheHash) and enures that only this and Hasher#getAvailableAlgoStoredHash are used for getting the list of available password hashing algorithms in our tests. --- x-pack/plugin/core/build.gradle | 4 ++++ x-pack/plugin/core/forbidden/hasher-signatures.txt | 2 ++ .../xpack/core/security/authc/support/Hasher.java | 14 ++++++++++++++ .../xpack/security/authc/RealmSettingsTests.java | 4 +--- .../xpack/security/authc/file/FileRealmTests.java | 3 +-- .../support/CachingUsernamePasswordRealmTests.java | 3 +-- 6 files changed, 23 insertions(+), 7 deletions(-) create mode 100644 x-pack/plugin/core/forbidden/hasher-signatures.txt diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index c20449724f8e0..d805a491e093a 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -95,6 +95,10 @@ forbiddenPatterns { exclude '**/*.zip' } +forbiddenApisMain { + signaturesFiles += files('forbidden/hasher-signatures.txt') +} + if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { diff --git a/x-pack/plugin/core/forbidden/hasher-signatures.txt b/x-pack/plugin/core/forbidden/hasher-signatures.txt new file mode 100644 index 0000000000000..98271161096a7 --- /dev/null +++ b/x-pack/plugin/core/forbidden/hasher-signatures.txt @@ -0,0 +1,2 @@ +@defaultMessage values should not be used as it can contain unwanted algorithms. Use Hasher#getAvailableAlgoStoredHash and Hasher#getAvailableAlgoCacheHash instead +org.elasticsearch.xpack.core.security.authc.support.Hasher#values() \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java index 28f263748135f..5413a38bd6288 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java @@ -7,6 +7,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.CharArrays; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.settings.SecureString; @@ -565,12 +566,25 @@ private static boolean verifyBcryptHash(SecureString text, char[] hash) { * combinations that can be used for password hashing. The identifiers can be used to get * an instance of the appropriate {@link Hasher} by using {@link #resolve(String) resolve()} */ + @SuppressForbidden(reason = "This is the only allowed way to get available values") public static List getAvailableAlgoStoredHash() { return Arrays.stream(Hasher.values()).map(Hasher::name).map(name -> name.toLowerCase(Locale.ROOT)) .filter(name -> (name.startsWith("pbkdf2") || name.startsWith("bcrypt"))) .collect(Collectors.toList()); } + /** + * Returns a list of lower case String identifiers for the Hashing algorithm and parameter + * combinations that can be used for password hashing in the cache. The identifiers can be used to get + * an instance of the appropriate {@link Hasher} by using {@link #resolve(String) resolve()} + */ + @SuppressForbidden(reason = "This is the only allowed way to get available values") + public static List getAvailableAlgoCacheHash() { + return Arrays.stream(Hasher.values()).map(Hasher::name).map(name -> name.toLowerCase(Locale.ROOT)) + .filter(name -> (name.equals("sha256") == false)) + .collect(Collectors.toList()); + } + public abstract char[] hash(SecureString data); public abstract boolean verify(SecureString data, char[] hash); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java index 7d7fd135349b1..eb33408f338c6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java @@ -17,18 +17,16 @@ import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.notNullValue; public class RealmSettingsTests extends ESTestCase { - private static final List CACHE_HASHING_ALGOS = Arrays.stream(Hasher.values()).map(Hasher::name).collect(Collectors.toList()); + private static final List CACHE_HASHING_ALGOS = Hasher.getAvailableAlgoCacheHash(); public void testRealmWithBlankTypeDoesNotValidate() throws Exception { final Settings.Builder builder = baseSettings(false); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index 168f608951e09..67ab33bac7380 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -22,7 +22,6 @@ import org.junit.Before; import org.mockito.stubbing.Answer; -import java.util.Locale; import java.util.Map; import java.util.function.Supplier; @@ -94,7 +93,7 @@ private RealmConfig getRealmConfig(Settings settings) { public void testAuthenticateCaching() throws Exception { Settings settings = Settings.builder() .put(RealmSettings.realmSettingPrefix(REALM_IDENTIFIER) + "cache.hash_algo", - Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT)) + randomFrom(Hasher.getAvailableAlgoCacheHash())) .put(globalSettings) .build(); RealmConfig config = getRealmConfig(settings); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java index 8b30cb85fed78..49f0d45966639 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java @@ -30,7 +30,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.Locale; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -66,7 +65,7 @@ public void stop() { @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42267") public void testCacheSettings() { - String cachingHashAlgo = Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT); + String cachingHashAlgo = randomFrom(Hasher.getAvailableAlgoCacheHash()); int maxUsers = randomIntBetween(10, 100); TimeValue ttl = TimeValue.timeValueMinutes(randomIntBetween(10, 20)); final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier("caching", "test_realm"); From db0fbf01cb19034060a148583f525d1a3b4ced9f Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 07:36:44 +0200 Subject: [PATCH 175/321] Add Package Level Documentation to o.e.r.blobstore (#42101) * Add Package Level Documentation to o.e.r.blobstore * Added verbose documentation for the `o.e.r.blobstore` package similar to that added for the snapshot package in https://github.com/elastic/elasticsearch/pull/38108 * Moved the documentation on the BlobStoreRepository to the package level to have things in a single place for easier readability. --- .../blobstore/BlobStoreRepository.java | 42 +--- .../repositories/blobstore/package-info.java | 208 ++++++++++++++++++ 2 files changed, 211 insertions(+), 39 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 5ed73a0058cc5..320b7ff2d5550 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -121,45 +121,9 @@ *

* This repository works with any {@link BlobStore} implementation. The blobStore could be (and preferred) lazy initialized in * {@link #createBlobStore()}. - *

- * BlobStoreRepository maintains the following structure in the blob store - *

- * {@code
- *   STORE_ROOT
- *   |- index-N           - JSON serialized {@link RepositoryData} containing a list of all snapshot ids and the indices belonging to
- *   |                      each snapshot, N is the generation of the file
- *   |- index.latest      - contains the numeric value of the latest generation of the index file (i.e. N from above)
- *   |- incompatible-snapshots - list of all snapshot ids that are no longer compatible with the current version of the cluster
- *   |- snap-20131010.dat - SMILE serialized {@link SnapshotInfo} for snapshot "20131010"
- *   |- meta-20131010.dat - SMILE serialized {@link MetaData} for snapshot "20131010" (includes only global metadata)
- *   |- snap-20131011.dat - SMILE serialized {@link SnapshotInfo} for snapshot "20131011"
- *   |- meta-20131011.dat - SMILE serialized {@link MetaData} for snapshot "20131011"
- *   .....
- *   |- indices/ - data for all indices
- *      |- Ac1342-B_x/ - data for index "foo" which was assigned the unique id of Ac1342-B_x in the repository
- *      |  |- meta-20131010.dat - JSON Serialized {@link IndexMetaData} for index "foo"
- *      |  |- 0/ - data for shard "0" of index "foo"
- *      |  |  |- __1                      \  (files with numeric names were created by older ES versions)
- *      |  |  |- __2                      |
- *      |  |  |- __VPO5oDMVT5y4Akv8T_AO_A |- files from different segments see snap-* for their mappings to real segment files
- *      |  |  |- __1gbJy18wS_2kv1qI7FgKuQ |
- *      |  |  |- __R8JvZAHlSMyMXyZc2SS8Zg /
- *      |  |  .....
- *      |  |  |- snap-20131010.dat - SMILE serialized {@link BlobStoreIndexShardSnapshot} for snapshot "20131010"
- *      |  |  |- snap-20131011.dat - SMILE serialized {@link BlobStoreIndexShardSnapshot} for snapshot "20131011"
- *      |  |  |- index-123 - SMILE serialized {@link BlobStoreIndexShardSnapshots} for the shard
- *      |  |
- *      |  |- 1/ - data for shard "1" of index "foo"
- *      |  |  |- __1
- *      |  |  .....
- *      |  |
- *      |  |-2/
- *      |  ......
- *      |
- *      |- 1xB0D8_B3y/ - data for index "bar" which was assigned the unique id of 1xB0D8_B3y in the repository
- *      ......
- * }
- * 
+ *

+ * For in depth documentation on how exactly implementations of this class interact with the snapshot functionality please refer to the + * documentation of the package {@link org.elasticsearch.repositories.blobstore}. */ public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository { private static final Logger logger = LogManager.getLogger(BlobStoreRepository.class); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java new file mode 100644 index 0000000000000..9d6d72f0458c9 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java @@ -0,0 +1,208 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + *

This package exposes the blobstore repository used by Elasticsearch Snapshots.

+ * + *

Preliminaries

+ * + *

The {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository} forms the basis of implementations of + * {@link org.elasticsearch.repositories.Repository} on top of a blob store. A blobstore can be used as the basis for an implementation + * as long as it provides for GET, PUT, DELETE, and LIST operations. For a read-only repository, it suffices if the blobstore provides only + * GET operations. + * These operations are formally defined as specified by the {@link org.elasticsearch.common.blobstore.BlobContainer} interface that + * any {@code BlobStoreRepository} implementation must provide via its implementation of + * {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository#getBlobContainer()}.

+ * + *

The blob store is written to and read from by master-eligible nodes and data nodes. All metadata related to a snapshot's + * scope and health is written by the master node.

+ *

The data-nodes on the other hand, write the data for each individual shard but do not write any blobs outside of shard directories for + * shards that they hold the primary of. For each shard, the data-node holding the shard's primary writes the actual data in form of + * the shard's segment files to the repository as well as metadata about all the segment files that the repository stores for the shard.

+ * + *

For the specifics on how the operations on the repository documented below are invoked during the snapshot process please refer to + * the documentation of the {@link org.elasticsearch.snapshots} package.

+ * + *

{@code BlobStoreRepository} maintains the following structure of blobs containing data and metadata in the blob store. The exact + * operations executed on these blobs are explained below.

+ *
+ * {@code
+ *   STORE_ROOT
+ *   |- index-N           - JSON serialized {@link org.elasticsearch.repositories.RepositoryData} containing a list of all snapshot ids
+ *   |                      and the indices belonging to each snapshot, N is the generation of the file
+ *   |- index.latest      - contains the numeric value of the latest generation of the index file (i.e. N from above)
+ *   |- incompatible-snapshots - list of all snapshot ids that are no longer compatible with the current version of the cluster
+ *   |- snap-20131010.dat - SMILE serialized {@link org.elasticsearch.snapshots.SnapshotInfo} for snapshot "20131010"
+ *   |- meta-20131010.dat - SMILE serialized {@link org.elasticsearch.cluster.metadata.MetaData} for snapshot "20131010"
+ *   |                      (includes only global metadata)
+ *   |- snap-20131011.dat - SMILE serialized {@link org.elasticsearch.snapshots.SnapshotInfo} for snapshot "20131011"
+ *   |- meta-20131011.dat - SMILE serialized {@link org.elasticsearch.cluster.metadata.MetaData} for snapshot "20131011"
+ *   .....
+ *   |- indices/ - data for all indices
+ *      |- Ac1342-B_x/ - data for index "foo" which was assigned the unique id Ac1342-B_x (not to be confused with the actual index uuid)
+ *      |  |             in the repository
+ *      |  |- meta-20131010.dat - JSON Serialized {@link org.elasticsearch.cluster.metadata.IndexMetaData} for index "foo"
+ *      |  |- 0/ - data for shard "0" of index "foo"
+ *      |  |  |- __1                      \  (files with numeric names were created by older ES versions)
+ *      |  |  |- __2                      |
+ *      |  |  |- __VPO5oDMVT5y4Akv8T_AO_A |- files from different segments see snap-* for their mappings to real segment files
+ *      |  |  |- __1gbJy18wS_2kv1qI7FgKuQ |
+ *      |  |  |- __R8JvZAHlSMyMXyZc2SS8Zg /
+ *      |  |  .....
+ *      |  |  |- snap-20131010.dat - SMILE serialized {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} for
+ *      |  |  |                      snapshot "20131010"
+ *      |  |  |- snap-20131011.dat - SMILE serialized {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} for
+ *      |  |  |                      snapshot "20131011"
+ *      |  |  |- index-123         - SMILE serialized {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots} for
+ *      |  |  |                      the shard
+ *      |  |
+ *      |  |- 1/ - data for shard "1" of index "foo"
+ *      |  |  |- __1
+ *      |  |  .....
+ *      |  |
+ *      |  |-2/
+ *      |  ......
+ *      |
+ *      |- 1xB0D8_B3y/ - data for index "bar" which was assigned the unique id of 1xB0D8_B3y in the repository
+ *      ......
+ * }
+ * 
+ * + *

Getting the Repository's RepositoryData

+ * + *

Loading the {@link org.elasticsearch.repositories.RepositoryData} that holds the list of all snapshots as well as the mapping of + * indices' names to their repository {@link org.elasticsearch.repositories.IndexId} is done by invoking + * {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository#getRepositoryData} and implemented as follows:

+ *
    + *
  1. + *
      + *
    1. The blobstore repository stores the {@code RepositoryData} in blobs named with incrementing suffix {@code N} at {@code /index-N} + * directly under the repository's root.
    2. + *
    3. The blobstore also stores the most recent {@code N} as a 64bit long in the blob {@code /index.latest} directly under the + * repository's root.
    4. + *
    + *
  2. + *
  3. + *
      + *
    1. First, find the most recent {@code RepositoryData} by getting a list of all index-N blobs through listing all blobs with prefix + * "index-" under the repository root and then selecting the one with the highest value for N.
    2. + *
    3. If this operation fails because the repository's {@code BlobContainer} does not support list operations (in the case of read-only + * repositories), read the highest value of N from the the index.latest blob.
    4. + *
    + *
  4. + *
  5. + *
      + *
    1. Use the just determined value of {@code N} and get the {@code /index-N} blob and deserialize the {@code RepositoryData} from it.
    2. + *
    3. If no value of {@code N} could be found since neither an {@code index.latest} nor any {@code index-N} blobs exist in the repository, + * it is assumed to be empty and {@link org.elasticsearch.repositories.RepositoryData#EMPTY} is returned.
    4. + *
    + *
  6. + *
+ *

Creating a Snapshot

+ * + *

Creating a snapshot in the repository happens in the three steps described in detail below.

+ * + *

Initializing a Snapshot in the Repository

+ * + *

Creating a snapshot in the repository starts with a call to {@link org.elasticsearch.repositories.Repository#initializeSnapshot} which + * the blob store repository implements via the following actions:

+ *
    + *
  1. Verify that no snapshot by the requested name exists.
  2. + *
  3. Write a blob containing the cluster metadata to the root of the blob store repository at {@code /meta-${snapshot-uuid}.dat}
  4. + *
  5. Write the metadata for each index to a blob in that index's directory at + * {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}.dat}
  6. + *
+ * TODO: This behavior is problematic, adjust these docs once https://github.com/elastic/elasticsearch/issues/41581 is fixed + * + *

Writing Shard Data (Segments)

+ * + *

Once all the metadata has been written by the snapshot initialization, the snapshot process moves on to writing the actual shard data + * to the repository by invoking {@link org.elasticsearch.repositories.Repository#snapshotShard} on the data-nodes that hold the primaries + * for the shards in the current snapshot. It is implemented as follows:

+ * + *

Note:

+ *
    + *
  • For each shard {@code i} in a given index, its path in the blob store is located at {@code /indices/${index-snapshot-uuid}/${i}}
  • + *
  • All the following steps are executed exclusively on the shard's primary's data node.
  • + *
+ * + *
    + *
  1. Create the {@link org.apache.lucene.index.IndexCommit} for the shard to snapshot.
  2. + *
  3. List all blobs in the shard's path. Find the {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots} blob + * with name {@code index-${N}} for the highest possible value of {@code N} in the list to get the information of what segment files are + * already available in the blobstore.
  4. + *
  5. By comparing the files in the {@code IndexCommit} and the available file list from the previous step, determine the segment files + * that need to be written to the blob store. For each segment that needs to be added to the blob store, generate a unique name by combining + * the segment data blob prefix {@code __} and a UUID and write the segment to the blobstore.
  6. + *
  7. After completing all segment writes, a blob containing a + * {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} with name {@code snap-${snapshot-uuid}.dat} is written to + * the shard's path and contains a list of all the files referenced by the snapshot as well as some metadata about the snapshot. See the + * documentation of {@code BlobStoreIndexShardSnapshot} for details on its contents.
  8. + *
  9. Once all the segments and the {@code BlobStoreIndexShardSnapshot} blob have been written, an updated + * {@code BlobStoreIndexShardSnapshots} blob is written to the shard's path with name {@code index-${N+1}}.
  10. + *
+ * + *

Finalizing the Snapshot

+ * + *

After all primaries have finished writing the necessary segment files to the blob store in the previous step, the master node moves on + * to finalizing the snapshot by invoking {@link org.elasticsearch.repositories.Repository#finalizeSnapshot}. This method executes the + * following actions in order:

+ *
    + *
  1. Write the {@link org.elasticsearch.snapshots.SnapshotInfo} blob for the given snapshot to the key {@code /snap-${snapshot-uuid}.dat} + * directly under the repository root.
  2. + *
  3. Write an updated {@code RepositoryData} blob to the key {@code /index-${N+1}} using the {@code N} determined when initializing the + * snapshot in the first step. When doing this, the implementation checks that the blob for generation {@code N + 1} has not yet been + * written to prevent concurrent updates to the repository. If the blob for {@code N + 1} already exists the execution of finalization + * stops under the assumption that a master failover occurred and the snapshot has already been finalized by the new master.
  4. + *
  5. Write the updated {@code /index.latest} blob containing the new repository generation {@code N + 1}.
  6. + *
+ * + *

Deleting a Snapshot

+ * + *

Deleting a snapshot is an operation that is exclusively executed on the master node that runs through the following sequence of + * action when {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository#deleteSnapshot} is invoked:

+ * + *
    + *
  1. Get the current {@code RepositoryData} from the latest {@code index-N} blob at the repository root.
  2. + *
  3. Write an updated {@code RepositoryData} blob with the deleted snapshot removed to key {@code /index-${N+1}} directly under the + * repository root.
  4. + *
  5. Write an updated {@code index.latest} blob containing {@code N + 1}.
  6. + *
  7. Delete the global {@code MetaData} blob {@code meta-${snapshot-uuid}.dat} stored directly under the repository root for the snapshot + * as well as the {@code SnapshotInfo} blob at {@code /snap-${snapshot-uuid}.dat}.
  8. + *
  9. For each index referenced by the snapshot: + *
      + *
    1. Delete the snapshot's {@code IndexMetaData} at {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}}.
    2. + *
    3. Go through all shard directories {@code /indices/${index-snapshot-uuid}/${i}} and: + *
        + *
      1. Remove the {@code BlobStoreIndexShardSnapshot} blob at {@code /indices/${index-snapshot-uuid}/${i}/snap-${snapshot-uuid}.dat}.
      2. + *
      3. List all blobs in the shard path {@code /indices/${index-snapshot-uuid}} and build a new {@code BlobStoreIndexShardSnapshots} from + * the remaining {@code BlobStoreIndexShardSnapshot} blobs in the shard. Afterwards, write it to the next shard generation blob at + * {@code /indices/${index-snapshot-uuid}/${i}/index-${N+1}} (The shard's generation is determined from the list of {@code index-N} blobs + * in the shard directory).
      4. + *
      5. Delete all segment blobs (identified by having the data blob prefix {@code __}) in the shard directory which are not referenced by + * the new {@code BlobStoreIndexShardSnapshots} that has been written in the previous step.
      6. + *
      + *
    4. + *
    + *
  10. + *
+ * TODO: The above sequence of actions can lead to leaking files when an index completely goes out of scope. Adjust this documentation once + * https://github.com/elastic/elasticsearch/issues/13159 is fixed. + */ +package org.elasticsearch.repositories.blobstore; From 9a152ee5fa058ed72952513ab78064dc71e6d32e Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Wed, 22 May 2019 08:12:58 +0200 Subject: [PATCH 176/321] move latch await to doNextSearch (#42275) move latch await to doNextSearch, fixes a race condition when the executor thread is faster than the coordinator thread fixes #42084 --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 27fba82338a1c..4249d7c61d0ad 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -64,7 +64,7 @@ protected String getJobId() { @Override protected IterationResult doProcess(SearchResponse searchResponse) { - awaitForLatch(); + assertFalse("should not be called as stoppedBeforeFinished is false", stoppedBeforeFinished); assertThat(step, equalTo(3)); ++step; return new IterationResult<>(Collections.emptyList(), 3, true); @@ -99,6 +99,9 @@ protected void doNextSearch(SearchRequest request, ActionListener state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); @@ -265,7 +267,6 @@ public void testStateMachineBrokenSearch() throws InterruptedException { } } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStop_AfterIndexerIsFinished() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); @@ -285,7 +286,6 @@ public void testStop_AfterIndexerIsFinished() throws InterruptedException { } } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStop_WhileIndexing() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); From 610230f8ca34e4aeef95dcc92a0f4de5b9c33ba6 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Wed, 22 May 2019 08:13:32 +0200 Subject: [PATCH 177/321] [ML-DataFrame] validate group name to not contain invalid characters (#42292) disallows of creating groupBy field with '[', ']', '>' in the name to be consistent with aggregations --- .../transforms/pivot/GroupConfig.java | 8 +++++ .../transforms/pivot/GroupConfigTests.java | 32 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java index 807c2e8d339dd..532477c44bdf4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; @@ -29,6 +30,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.regex.Matcher; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -135,6 +137,7 @@ public static GroupConfig fromXContent(final XContentParser parser, boolean leni private static Map parseGroupConfig(final XContentParser parser, boolean lenient) throws IOException { + Matcher validAggMatcher = AggregatorFactories.VALID_AGG_NAME.matcher(""); LinkedHashMap groups = new LinkedHashMap<>(); // be parsing friendly, whether the token needs to be advanced or not (similar to what ObjectParser does) @@ -150,6 +153,11 @@ private static Map parseGroupConfig(final XContentPar ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); String destinationFieldName = parser.currentName(); + if (validAggMatcher.reset(destinationFieldName).matches() == false) { + throw new ParsingException(parser.getTokenLocation(), "Invalid group name [" + destinationFieldName + + "]. Group names can contain any character except '[', ']', and '>'"); + } + token = parser.nextToken(); ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation); token = parser.nextToken(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java index f7b9552584221..11dfc55264a21 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.ToXContent; @@ -27,6 +28,9 @@ public class GroupConfigTests extends AbstractSerializingTestCase { + // array of illegal characters, see {@link AggregatorFactories#VALID_AGG_NAME} + private static final char[] ILLEGAL_FIELD_NAME_CHARACTERS = {'[', ']', '>'}; + public static GroupConfig randomGroupConfig() { Map source = new LinkedHashMap<>(); Map groups = new LinkedHashMap<>(); @@ -88,6 +92,34 @@ public void testEmptyGroupBy() throws IOException { } } + public void testInvalidGroupByNames() throws IOException { + + String invalidName = randomAlphaOfLengthBetween(0, 5) + + ILLEGAL_FIELD_NAME_CHARACTERS[randomIntBetween(0, ILLEGAL_FIELD_NAME_CHARACTERS.length - 1)] + + randomAlphaOfLengthBetween(0, 5); + + XContentBuilder source = JsonXContent.contentBuilder() + .startObject() + .startObject(invalidName) + .startObject("terms") + .field("field", "user") + .endObject() + .endObject() + .endObject(); + + // lenient, passes but reports invalid + try (XContentParser parser = createParser(source)) { + GroupConfig groupConfig = GroupConfig.fromXContent(parser, true); + assertFalse(groupConfig.isValid()); + } + + // strict throws + try (XContentParser parser = createParser(source)) { + Exception e = expectThrows(ParsingException.class, () -> GroupConfig.fromXContent(parser, false)); + assertTrue(e.getMessage().startsWith("Invalid group name")); + } + } + private static Map getSource(SingleGroupSource groupSource) { try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { XContentBuilder content = groupSource.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); From fccb7a2820147a76fdb69b9fc6817c6bd6aa5833 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 22 May 2019 10:32:30 +0300 Subject: [PATCH 178/321] [ML] Include node name when native controller cannot start process (#42225) This adds the node name where we fail to start a process via the native controller to facilitate debugging as otherwise it might not be known to which node the job was allocated. --- .../elasticsearch/xpack/ml/MachineLearning.java | 2 +- .../xpack/ml/MachineLearningFeatureSet.java | 3 ++- .../elasticsearch/xpack/ml/MlLifeCycleService.java | 4 +++- .../xpack/ml/process/NativeController.java | 14 +++++++++----- .../xpack/ml/process/NativeControllerHolder.java | 4 ++-- .../xpack/ml/process/NativeControllerTests.java | 11 +++++++---- 6 files changed, 24 insertions(+), 14 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index de945b9bc6c3d..f679170bc673d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -437,7 +437,7 @@ public Collection createComponents(Client client, ClusterService cluster NormalizerProcessFactory normalizerProcessFactory; if (MachineLearningField.AUTODETECT_PROCESS.get(settings) && MachineLearningFeatureSet.isRunningOnMlPlatform(true)) { try { - NativeController nativeController = NativeControllerHolder.getNativeController(environment); + NativeController nativeController = NativeControllerHolder.getNativeController(clusterService.getNodeName(), environment); if (nativeController == null) { // This will only only happen when path.home is not set, which is disallowed in production throw new ElasticsearchException("Failed to create native process controller for Machine Learning"); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index c913babbaa405..bcfab50c21e00 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -79,7 +79,8 @@ public MachineLearningFeatureSet(Environment environment, ClusterService cluster if (enabled && XPackPlugin.transportClientMode(environment.settings()) == false) { try { if (isRunningOnMlPlatform(true)) { - NativeController nativeController = NativeControllerHolder.getNativeController(environment); + NativeController nativeController = NativeControllerHolder.getNativeController(clusterService.getNodeName(), + environment); if (nativeController != null) { nativeCodeInfo = nativeController.getNativeCodeInfo(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java index 06d9b749e1a89..7309afa6b3ab4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java @@ -19,6 +19,7 @@ public class MlLifeCycleService { private final Environment environment; + private final ClusterService clusterService; private final DatafeedManager datafeedManager; private final AutodetectProcessManager autodetectProcessManager; private final MlMemoryTracker memoryTracker; @@ -26,6 +27,7 @@ public class MlLifeCycleService { public MlLifeCycleService(Environment environment, ClusterService clusterService, DatafeedManager datafeedManager, AutodetectProcessManager autodetectProcessManager, MlMemoryTracker memoryTracker) { this.environment = environment; + this.clusterService = clusterService; this.datafeedManager = datafeedManager; this.autodetectProcessManager = autodetectProcessManager; this.memoryTracker = memoryTracker; @@ -46,7 +48,7 @@ public synchronized void stop() { if (datafeedManager != null) { datafeedManager.isolateAllDatafeedsOnThisNodeBeforeShutdown(); } - NativeController nativeController = NativeControllerHolder.getNativeController(environment); + NativeController nativeController = NativeControllerHolder.getNativeController(clusterService.getNodeName(), environment); if (nativeController != null) { // This kills autodetect processes WITHOUT closing the jobs, so they get reallocated. if (autodetectProcessManager != null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java index 2dc86825a1209..5dfa86ad22583 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java @@ -43,15 +43,17 @@ public class NativeController { public static final Map UNKNOWN_NATIVE_CODE_INFO = Map.of("version", "N/A", "build_hash", "N/A"); + private final String localNodeName; private final CppLogMessageHandler cppLogHandler; private final OutputStream commandStream; - NativeController(Environment env, NamedPipeHelper namedPipeHelper) throws IOException { + NativeController(String localNodeName, Environment env, NamedPipeHelper namedPipeHelper) throws IOException { ProcessPipes processPipes = new ProcessPipes(env, namedPipeHelper, CONTROLLER, null, true, true, false, false, false, false); processPipes.connectStreams(CONTROLLER_CONNECT_TIMEOUT); - cppLogHandler = new CppLogMessageHandler(null, processPipes.getLogStream().get()); - commandStream = new BufferedOutputStream(processPipes.getCommandStream().get()); + this.localNodeName = localNodeName; + this.cppLogHandler = new CppLogMessageHandler(null, processPipes.getLogStream().get()); + this.commandStream = new BufferedOutputStream(processPipes.getCommandStream().get()); } void tailLogsInThread() { @@ -98,7 +100,8 @@ public void startProcess(List command) throws IOException { } if (cppLogHandler.hasLogStreamEnded()) { - String msg = "Cannot start process [" + command.get(0) + "]: native controller process has stopped"; + String msg = "Cannot start process [" + command.get(0) + "]: native controller process has stopped on node [" + + localNodeName + "]"; LOGGER.error(msg); throw new ElasticsearchException(msg); } @@ -124,7 +127,8 @@ public void killProcess(long pid) throws TimeoutException, IOException { } if (cppLogHandler.hasLogStreamEnded()) { - String msg = "Cannot kill process with PID [" + pid + "]: native controller process has stopped"; + String msg = "Cannot kill process with PID [" + pid + "]: native controller process has stopped on node [" + + localNodeName + "]"; LOGGER.error(msg); throw new ElasticsearchException(msg); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java index 67e24b44a8494..5365a11f6b560 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java @@ -32,12 +32,12 @@ private NativeControllerHolder() { * * Calls may throw an exception if initial connection to the C++ process fails. */ - public static NativeController getNativeController(Environment environment) throws IOException { + public static NativeController getNativeController(String localNodeName, Environment environment) throws IOException { if (MachineLearningField.AUTODETECT_PROCESS.get(environment.settings())) { synchronized (lock) { if (nativeController == null) { - nativeController = new NativeController(environment, new NamedPipeHelper()); + nativeController = new NativeController(localNodeName, environment, new NamedPipeHelper()); nativeController.tailLogsInThread(); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java index ac00e8a24e1cf..c799f14235920 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java @@ -30,6 +30,8 @@ public class NativeControllerTests extends ESTestCase { + private static final String NODE_NAME = "native-controller-tests-node"; + private static final String TEST_MESSAGE = "{\"logger\":\"controller\",\"timestamp\":1478261151445,\"level\":\"INFO\",\"pid\":10211," + "\"thread\":\"0x7fff7d2a8000\",\"message\":\"controller (64 bit): Version 6.0.0-alpha1-SNAPSHOT (Build a0d6ef8819418c) " + "Copyright (c) 2017 Elasticsearch BV\",\"method\":\"main\",\"file\":\"Main.cc\",\"line\":123}\n"; @@ -50,7 +52,7 @@ public void testStartProcessCommand() throws IOException { command.add("--arg2=42"); command.add("--arg3=something with spaces"); - NativeController nativeController = new NativeController(TestEnvironment.newEnvironment(settings), namedPipeHelper); + NativeController nativeController = new NativeController(NODE_NAME, TestEnvironment.newEnvironment(settings), namedPipeHelper); nativeController.startProcess(command); assertEquals("start\tmy_process\t--arg1\t--arg2=42\t--arg3=something with spaces\n", @@ -65,7 +67,7 @@ public void testGetNativeCodeInfo() throws IOException, TimeoutException { ByteArrayOutputStream commandStream = new ByteArrayOutputStream(); when(namedPipeHelper.openNamedPipeOutputStream(contains("command"), any(Duration.class))).thenReturn(commandStream); - NativeController nativeController = new NativeController(TestEnvironment.newEnvironment(settings), namedPipeHelper); + NativeController nativeController = new NativeController(NODE_NAME, TestEnvironment.newEnvironment(settings), namedPipeHelper); nativeController.tailLogsInThread(); Map nativeCodeInfo = nativeController.getNativeCodeInfo(); @@ -83,7 +85,7 @@ public void testControllerDeath() throws Exception { ByteArrayOutputStream commandStream = new ByteArrayOutputStream(); when(namedPipeHelper.openNamedPipeOutputStream(contains("command"), any(Duration.class))).thenReturn(commandStream); - NativeController nativeController = new NativeController(TestEnvironment.newEnvironment(settings), namedPipeHelper); + NativeController nativeController = new NativeController(NODE_NAME, TestEnvironment.newEnvironment(settings), namedPipeHelper); nativeController.tailLogsInThread(); // As soon as the log stream ends startProcess should think the native controller has died @@ -91,7 +93,8 @@ public void testControllerDeath() throws Exception { ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> nativeController.startProcess(Collections.singletonList("my process"))); - assertEquals("Cannot start process [my process]: native controller process has stopped", e.getMessage()); + assertEquals("Cannot start process [my process]: native controller process has stopped on node " + + "[native-controller-tests-node]", e.getMessage()); }); } } From 464f7699c516047021021061c748856ae7666535 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 09:35:07 +0200 Subject: [PATCH 179/321] Use comparator for Reconfigurator (#42283) Simplifies the voting configuration reconfiguration logic by switching to an explicit Comparator for the priorities. Does not make changes to the behavior of the component. --- .../cluster/coordination/Reconfigurator.java | 132 +++++++++--------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java index b189b7ec2cc2d..7a3a54d73b2fe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java @@ -27,15 +27,10 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.set.Sets; -import java.util.Collection; -import java.util.Collections; import java.util.Set; -import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Computes the optimal configuration of voting nodes in the cluster. @@ -102,76 +97,36 @@ public VotingConfiguration reconfigure(Set liveNodes, Set logger.trace("{} reconfiguring {} based on liveNodes={}, retiredNodeIds={}, currentMaster={}", this, currentConfig, liveNodes, retiredNodeIds, currentMaster); - /* - * There are three true/false properties of each node in play: live/non-live, retired/non-retired and in-config/not-in-config. - * Firstly we divide the nodes into disjoint sets based on these properties: - * - * - nonRetiredMaster - * - nonRetiredNotMasterInConfigNotLiveIds - * - nonRetiredInConfigLiveIds - * - nonRetiredLiveNotInConfigIds - * - * The other 5 possibilities are not relevant: - * - retired, in-config, live -- retired nodes should be removed from the config - * - retired, in-config, non-live -- retired nodes should be removed from the config - * - retired, not-in-config, live -- cannot add a retired node back to the config - * - retired, not-in-config, non-live -- cannot add a retired node back to the config - * - non-retired, non-live, not-in-config -- no evidence this node exists at all - */ - final Set liveNodeIds = liveNodes.stream() .filter(DiscoveryNode::isMasterNode).map(DiscoveryNode::getId).collect(Collectors.toSet()); - final Set liveInConfigIds = new TreeSet<>(currentConfig.getNodeIds()); - liveInConfigIds.retainAll(liveNodeIds); - - final SortedSet inConfigNotLiveIds = Sets.unmodifiableSortedDifference(currentConfig.getNodeIds(), liveInConfigIds); - final SortedSet nonRetiredInConfigNotLiveIds = new TreeSet<>(inConfigNotLiveIds); - nonRetiredInConfigNotLiveIds.removeAll(retiredNodeIds); - - final Set nonRetiredInConfigLiveIds = new TreeSet<>(liveInConfigIds); - nonRetiredInConfigLiveIds.removeAll(retiredNodeIds); - - final Set nonRetiredInConfigLiveMasterIds; - final Set nonRetiredInConfigLiveNotMasterIds; - if (nonRetiredInConfigLiveIds.contains(currentMaster.getId())) { - nonRetiredInConfigLiveNotMasterIds = new TreeSet<>(nonRetiredInConfigLiveIds); - nonRetiredInConfigLiveNotMasterIds.remove(currentMaster.getId()); - nonRetiredInConfigLiveMasterIds = Collections.singleton(currentMaster.getId()); - } else { - nonRetiredInConfigLiveNotMasterIds = nonRetiredInConfigLiveIds; - nonRetiredInConfigLiveMasterIds = Collections.emptySet(); - } - - final SortedSet nonRetiredLiveNotInConfigIds = Sets.sortedDifference(liveNodeIds, currentConfig.getNodeIds()); - nonRetiredLiveNotInConfigIds.removeAll(retiredNodeIds); + final Set currentConfigNodeIds = currentConfig.getNodeIds(); + + final Set orderedCandidateNodes = new TreeSet<>(); + liveNodes.stream() + .filter(DiscoveryNode::isMasterNode) + .filter(n -> retiredNodeIds.contains(n.getId()) == false) + .forEach(n -> orderedCandidateNodes.add(new VotingConfigNode(n.getId(), true, + n.getId().equals(currentMaster.getId()), currentConfigNodeIds.contains(n.getId())))); + currentConfigNodeIds.stream() + .filter(nid -> liveNodeIds.contains(nid) == false) + .filter(nid -> retiredNodeIds.contains(nid) == false) + .forEach(nid -> orderedCandidateNodes.add(new VotingConfigNode(nid, false, false, true))); /* * Now we work out how many nodes should be in the configuration: */ - final int targetSize; - - final int nonRetiredLiveNodeCount = nonRetiredInConfigLiveIds.size() + nonRetiredLiveNotInConfigIds.size(); - final int nonRetiredConfigSize = nonRetiredInConfigLiveIds.size() + nonRetiredInConfigNotLiveIds.size(); - if (autoShrinkVotingConfiguration) { - if (nonRetiredLiveNodeCount >= 3) { - targetSize = roundDownToOdd(nonRetiredLiveNodeCount); - } else { - // only have one or two available nodes; may not shrink below 3 nodes automatically, but if - // the config (excluding retired nodes) is already smaller than 3 then it's ok. - targetSize = nonRetiredConfigSize < 3 ? 1 : 3; - } - } else { - targetSize = Math.max(roundDownToOdd(nonRetiredLiveNodeCount), nonRetiredConfigSize); - } + final int nonRetiredConfigSize = Math.toIntExact(orderedCandidateNodes.stream().filter(n -> n.inCurrentConfig).count()); + final int minimumConfigEnforcedSize = autoShrinkVotingConfiguration ? (nonRetiredConfigSize < 3 ? 1 : 3) : nonRetiredConfigSize; + final int nonRetiredLiveNodeCount = Math.toIntExact(orderedCandidateNodes.stream().filter(n -> n.live).count()); + final int targetSize = Math.max(roundDownToOdd(nonRetiredLiveNodeCount), minimumConfigEnforcedSize); - /* - * The new configuration is formed by taking this many nodes in the following preference order: - */ final VotingConfiguration newConfig = new VotingConfiguration( - // live master first, then other live nodes, preferring the current config, and if we need more then use non-live nodes - Stream.of(nonRetiredInConfigLiveMasterIds, nonRetiredInConfigLiveNotMasterIds, nonRetiredLiveNotInConfigIds, - nonRetiredInConfigNotLiveIds).flatMap(Collection::stream).limit(targetSize).collect(Collectors.toSet())); + orderedCandidateNodes.stream() + .limit(targetSize) + .map(n -> n.id) + .collect(Collectors.toSet())); + // new configuration should have a quorum if (newConfig.hasQuorum(liveNodeIds)) { return newConfig; } else { @@ -179,4 +134,49 @@ public VotingConfiguration reconfigure(Set liveNodes, Set return currentConfig; } } + + static class VotingConfigNode implements Comparable { + final String id; + final boolean live; + final boolean currentMaster; + final boolean inCurrentConfig; + + VotingConfigNode(String id, boolean live, boolean currentMaster, boolean inCurrentConfig) { + this.id = id; + this.live = live; + this.currentMaster = currentMaster; + this.inCurrentConfig = inCurrentConfig; + } + + @Override + public int compareTo(VotingConfigNode other) { + // prefer nodes that are live + final int liveComp = Boolean.compare(other.live, live); + if (liveComp != 0) { + return liveComp; + } + // prefer nodes that are in current config for stability + final int inCurrentConfigComp = Boolean.compare(other.inCurrentConfig, inCurrentConfig); + if (inCurrentConfigComp != 0) { + return inCurrentConfigComp; + } + // prefer current master + final int currentMasterComp = Boolean.compare(other.currentMaster, currentMaster); + if (currentMasterComp != 0) { + return currentMasterComp; + } + // tiebreak by node id to have stable ordering + return id.compareTo(other.id); + } + + @Override + public String toString() { + return "VotingConfigNode{" + + "id='" + id + '\'' + + ", live=" + live + + ", currentMaster=" + currentMaster + + ", inCurrentConfig=" + inCurrentConfig + + '}'; + } + } } From 2ddd39aaa9d4613a92334fb5a21fedd7a64fefb7 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 09:47:23 +0200 Subject: [PATCH 180/321] Introduce ShardState Enum + Slight Cleanup SnapshotsInProgress (#41940) * Added separate enum for the state of each shard, it was really confusing that we used the same enum for the state of the snapshot overall and the state of each individual shard * relates https://github.com/elastic/elasticsearch/pull/40943#issuecomment-488664150 * Shortened some obvious spots in equals method and saved a few lines via `computeIfAbsent` to make up for adding 50 new lines to this class --- .../TransportSnapshotsStatusAction.java | 1 - .../cluster/SnapshotsInProgress.java | 98 ++++++++++++------- .../snapshots/SnapshotShardsService.java | 12 ++- .../snapshots/SnapshotsService.java | 21 ++-- .../cluster/SnapshotsInProgressTests.java | 11 ++- .../SharedClusterSnapshotRestoreIT.java | 7 +- ...SnapshotsInProgressSerializationTests.java | 3 +- 7 files changed, 95 insertions(+), 58 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index c2f0d3dd0c074..8430d1868c88d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -174,7 +174,6 @@ private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, Li break; case INIT: case WAITING: - case STARTED: stage = SnapshotIndexShardStage.STARTED; break; case SUCCESS: diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 5190adf7ba2d9..ae9506706e36a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -42,6 +42,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; /** * Meta data about snapshots that are currently executing @@ -53,12 +54,7 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - - SnapshotsInProgress that = (SnapshotsInProgress) o; - - if (!entries.equals(that.entries)) return false; - - return true; + return entries.equals(((SnapshotsInProgress) o).entries); } @Override @@ -208,18 +204,11 @@ public String toString() { return snapshot.toString(); } - // package private for testing - ImmutableOpenMap> findWaitingIndices(ImmutableOpenMap shards) { + private ImmutableOpenMap> findWaitingIndices(ImmutableOpenMap shards) { Map> waitingIndicesMap = new HashMap<>(); for (ObjectObjectCursor entry : shards) { - if (entry.value.state() == State.WAITING) { - final String indexName = entry.key.getIndexName(); - List waitingShards = waitingIndicesMap.get(indexName); - if (waitingShards == null) { - waitingShards = new ArrayList<>(); - waitingIndicesMap.put(indexName, waitingShards); - } - waitingShards.add(entry.key); + if (entry.value.state() == ShardState.WAITING) { + waitingIndicesMap.computeIfAbsent(entry.key.getIndexName(), k -> new ArrayList<>()).add(entry.key); } } if (waitingIndicesMap.isEmpty()) { @@ -241,28 +230,27 @@ ImmutableOpenMap> findWaitingIndices(ImmutableOpenMap shards) { for (ObjectCursor status : shards) { - if (status.value.state().completed() == false) { + if (status.value.state().completed == false) { return false; } } return true; } - public static class ShardSnapshotStatus { - private final State state; + private final ShardState state; private final String nodeId; private final String reason; public ShardSnapshotStatus(String nodeId) { - this(nodeId, State.INIT); + this(nodeId, ShardState.INIT); } - public ShardSnapshotStatus(String nodeId, State state) { + public ShardSnapshotStatus(String nodeId, ShardState state) { this(nodeId, state, null); } - public ShardSnapshotStatus(String nodeId, State state, String reason) { + public ShardSnapshotStatus(String nodeId, ShardState state, String reason) { this.nodeId = nodeId; this.state = state; this.reason = reason; @@ -272,11 +260,11 @@ public ShardSnapshotStatus(String nodeId, State state, String reason) { public ShardSnapshotStatus(StreamInput in) throws IOException { nodeId = in.readOptionalString(); - state = State.fromValue(in.readByte()); + state = ShardState.fromValue(in.readByte()); reason = in.readOptionalString(); } - public State state() { + public ShardState state() { return state; } @@ -298,14 +286,9 @@ public void writeTo(StreamOutput out) throws IOException { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - ShardSnapshotStatus status = (ShardSnapshotStatus) o; + return Objects.equals(nodeId, status.nodeId) && Objects.equals(reason, status.reason) && state == status.state; - if (nodeId != null ? !nodeId.equals(status.nodeId) : status.nodeId != null) return false; - if (reason != null ? !reason.equals(status.reason) : status.reason != null) return false; - if (state != status.state) return false; - - return true; } @Override @@ -331,11 +314,11 @@ public enum State { MISSING((byte) 5, true, true), WAITING((byte) 6, false, false); - private byte value; + private final byte value; - private boolean completed; + private final boolean completed; - private boolean failed; + private final boolean failed; State(byte value, boolean completed, boolean failed) { this.value = value; @@ -379,7 +362,6 @@ public static State fromValue(byte value) { private final List entries; - public SnapshotsInProgress(List entries) { this.entries = entries; } @@ -534,4 +516,52 @@ public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params p builder.endArray(); builder.endObject(); } + + public enum ShardState { + INIT((byte) 0, false, false), + SUCCESS((byte) 2, true, false), + FAILED((byte) 3, true, true), + ABORTED((byte) 4, false, true), + MISSING((byte) 5, true, true), + WAITING((byte) 6, false, false); + + private final byte value; + + private final boolean completed; + + private final boolean failed; + + ShardState(byte value, boolean completed, boolean failed) { + this.value = value; + this.completed = completed; + this.failed = failed; + } + + public boolean completed() { + return completed; + } + + public boolean failed() { + return failed; + } + + public static ShardState fromValue(byte value) { + switch (value) { + case 0: + return INIT; + case 2: + return SUCCESS; + case 3: + return FAILED; + case 4: + return ABORTED; + case 5: + return MISSING; + case 6: + return WAITING; + default: + throw new IllegalArgumentException("No shard snapshot state for value [" + value + "]"); + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index f052a1c7abeb8..a0c5ea9392c67 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -39,6 +39,7 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -248,7 +249,8 @@ private void startNewSnapshots(SnapshotsInProgress snapshotsInProgress) { // Add all new shards to start processing on final ShardId shardId = shard.key; final ShardSnapshotStatus shardSnapshotStatus = shard.value; - if (localNodeId.equals(shardSnapshotStatus.nodeId()) && shardSnapshotStatus.state() == State.INIT + if (localNodeId.equals(shardSnapshotStatus.nodeId()) + && shardSnapshotStatus.state() == ShardState.INIT && snapshotShards.containsKey(shardId) == false) { logger.trace("[{}] - Adding shard to the queue", shardId); if (startedShards == null) { @@ -286,7 +288,7 @@ private void startNewSnapshots(SnapshotsInProgress snapshotsInProgress) { } else { // due to CS batching we might have missed the INIT state and straight went into ABORTED // notify master that abort has completed by moving to FAILED - if (shard.value.state() == State.ABORTED) { + if (shard.value.state() == ShardState.ABORTED) { notifyFailedSnapshotShard(snapshot, shard.key, shard.value.reason()); } } @@ -480,12 +482,14 @@ public String toString() { /** Notify the master node that the given shard has been successfully snapshotted **/ private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.SUCCESS)); + sendSnapshotShardUpdate(snapshot, shardId, + new ShardSnapshotStatus(clusterService.localNode().getId(), ShardState.SUCCESS)); } /** Notify the master node that the given shard failed to be snapshotted **/ private void notifyFailedSnapshotShard(final Snapshot snapshot, final ShardId shardId, final String failure) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.FAILED, failure)); + sendSnapshotShardUpdate(snapshot, shardId, + new ShardSnapshotStatus(clusterService.localNode().getId(), ShardState.FAILED, failure)); } /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 1559bae8259b0..e606bff0cb9e4 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -776,7 +777,7 @@ public ClusterState execute(ClusterState currentState) { logger.warn("failing snapshot of shard [{}] on closed node [{}]", shardEntry.key, shardStatus.nodeId()); shards.put(shardEntry.key, - new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown")); + new ShardSnapshotStatus(shardStatus.nodeId(), ShardState.FAILED, "node shutdown")); } } } @@ -872,7 +873,7 @@ private static ImmutableOpenMap processWaitingShar for (ObjectObjectCursor shardEntry : snapshotShards) { ShardSnapshotStatus shardStatus = shardEntry.value; ShardId shardId = shardEntry.key; - if (shardStatus.state() == State.WAITING) { + if (shardStatus.state() == ShardState.WAITING) { IndexRoutingTable indexShardRoutingTable = routingTable.index(shardId.getIndex()); if (indexShardRoutingTable != null) { IndexShardRoutingTable shardRouting = indexShardRoutingTable.shard(shardId.id()); @@ -893,7 +894,7 @@ private static ImmutableOpenMap processWaitingShar // Shard that we were waiting for went into unassigned state or disappeared - giving up snapshotChanged = true; logger.warn("failing snapshot of shard [{}] on unassigned shard [{}]", shardId, shardStatus.nodeId()); - shards.put(shardId, new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "shard is unassigned")); + shards.put(shardId, new ShardSnapshotStatus(shardStatus.nodeId(), ShardState.FAILED, "shard is unassigned")); } else { shards.put(shardId, shardStatus); } @@ -943,7 +944,7 @@ private static Tuple, Set> indicesWithMissingShards( Set missing = new HashSet<>(); Set closed = new HashSet<>(); for (ObjectObjectCursor entry : shards) { - if (entry.value.state() == State.MISSING) { + if (entry.value.state() == ShardState.MISSING) { if (metaData.hasIndex(entry.key.getIndex().getName()) && metaData.getIndexSafe(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) { closed.add(entry.key.getIndex().getName()); @@ -1195,7 +1196,7 @@ public ClusterState execute(ClusterState currentState) { for (ObjectObjectCursor shardEntry : snapshotEntry.shards()) { ShardSnapshotStatus status = shardEntry.value; if (status.state().completed() == false) { - status = new ShardSnapshotStatus(status.nodeId(), State.ABORTED, "aborted by snapshot deletion"); + status = new ShardSnapshotStatus(status.nodeId(), ShardState.ABORTED, "aborted by snapshot deletion"); } shardsBuilder.put(shardEntry.key, status); } @@ -1385,7 +1386,7 @@ private static ImmutableOpenMap shards = ImmutableOpenMap.builder(); // test more than one waiting shard in an index - shards.put(new ShardId(idx1Name, idx1UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING)); - shards.put(new ShardId(idx1Name, idx1UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING)); + shards.put(new ShardId(idx1Name, idx1UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), ShardState.WAITING)); + shards.put(new ShardId(idx1Name, idx1UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), ShardState.WAITING)); shards.put(new ShardId(idx1Name, idx1UUID, 2), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), "")); // test exactly one waiting shard in an index - shards.put(new ShardId(idx2Name, idx2UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING)); + shards.put(new ShardId(idx2Name, idx2UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), ShardState.WAITING)); shards.put(new ShardId(idx2Name, idx2UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), "")); // test no waiting shards in an index shards.put(new ShardId(idx3Name, idx3UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), "")); @@ -72,7 +73,7 @@ public void testWaitingIndices() { assertFalse(waitingIndices.containsKey(idx3Name)); } - private State randomNonWaitingState() { - return randomFrom(Arrays.stream(State.values()).filter(s -> s != State.WAITING).collect(Collectors.toSet())); + private ShardState randomNonWaitingState() { + return randomFrom(Arrays.stream(ShardState.values()).filter(s -> s != ShardState.WAITING).collect(Collectors.toSet())); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 3a78b4786fc5c..0aa9fe1a9e2a6 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -53,6 +53,7 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.Entry; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -2701,9 +2702,9 @@ public void testDeleteOrphanSnapshot() throws Exception { public ClusterState execute(ClusterState currentState) { // Simulate orphan snapshot ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); - shards.put(new ShardId(idxName, "_na_", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted")); - shards.put(new ShardId(idxName, "_na_", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted")); - shards.put(new ShardId(idxName, "_na_", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted")); + shards.put(new ShardId(idxName, "_na_", 0), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); + shards.put(new ShardId(idxName, "_na_", 1), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); + shards.put(new ShardId(idxName, "_na_", 2), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); return ClusterState.builder(currentState) .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(List.of(new Entry( new Snapshot(repositoryName, createSnapshotResponse.getSnapshotInfo().snapshotId()), diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java index 3f23c8f0a2ded..6c8ddfb56c1cf 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.Entry; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -65,7 +66,7 @@ private Entry randomSnapshot() { for (int j = 0; j < shardsCount; j++) { ShardId shardId = new ShardId(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10)), randomIntBetween(0, 10)); String nodeId = randomAlphaOfLength(10); - State shardState = randomFrom(State.values()); + ShardState shardState = randomFrom(ShardState.values()); builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(nodeId, shardState, shardState.failed() ? randomAlphaOfLength(10) : null)); } From 4a9788eeb31c01c8f6039884eae9bec40707cc5b Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 10:02:14 +0200 Subject: [PATCH 181/321] Fix testTracerLog Network Tests (#42286) * Fix testTracerLog Network Tests * Start appender before using it like we do for e.g. the Netty leak detection appender to avoid interference from actions on the network threads that might still be dangling from previous tests in the same suite * Closes #41890 --- .../transport/AbstractSimpleTransportTestCase.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 783fe77b9bf9c..441044328be96 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1047,10 +1047,9 @@ public String executor() { .build()); MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(LogManager.getLogger("org.elasticsearch.transport.TransportService.tracer"), appender); try { appender.start(); - + Loggers.addAppender(LogManager.getLogger("org.elasticsearch.transport.TransportService.tracer"), appender); final String requestSent = ".*\\[internal:test].*sent to.*\\{TS_B}.*"; final MockLogAppender.LoggingExpectation requestSentExpectation = new MockLogAppender.PatternSeenEventExpectation( From a28d405a30ce92cf8fd7abc86915a0ff4cef2166 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 22 May 2019 11:08:11 +0300 Subject: [PATCH 182/321] Revert "mute failing filerealm hash caching tests (#42304)" This reverts commit 8907dc9598667a1fa29be0ba22c7030ebee1101b. --- .../elasticsearch/xpack/security/authc/file/FileRealmTests.java | 1 - .../authc/support/CachingUsernamePasswordRealmTests.java | 1 - 2 files changed, 2 deletions(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index 67ab33bac7380..d2ab879d4d4ff 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -89,7 +89,6 @@ private RealmConfig getRealmConfig(Settings settings) { return new RealmConfig(REALM_IDENTIFIER, settings, TestEnvironment.newEnvironment(settings), threadContext); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42267") public void testAuthenticateCaching() throws Exception { Settings settings = Settings.builder() .put(RealmSettings.realmSettingPrefix(REALM_IDENTIFIER) + "cache.hash_algo", diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java index 49f0d45966639..91a0fc9d94e2e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java @@ -63,7 +63,6 @@ public void stop() { } } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42267") public void testCacheSettings() { String cachingHashAlgo = randomFrom(Hasher.getAvailableAlgoCacheHash()); int maxUsers = randomIntBetween(10, 100); From b31482e65992f90d76b13286043f6986899c4403 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 10:25:11 +0200 Subject: [PATCH 183/321] Remove usage of max_local_storage_nodes in test infrastructure (#41652) Moves the test infrastructure away from using node.max_local_storage_nodes, allowing us in a follow-up PR to deprecate this setting in 7.x and to remove it in 8.0. This also changes the behavior of InternalTestCluster so that starting up nodes will not automatically reuse data folders of previously stopped nodes. If this behavior is desired, it needs to be explicitly done by passing the data path from the stopped node to the new node that is started. --- .../gradle/test/ClusterFormationTasks.groovy | 1 - .../ingest/common/IngestRestartIT.java | 10 +- .../ClusterAllocationExplainIT.java | 9 +- .../cluster/ClusterInfoServiceIT.java | 10 -- .../cluster/MinimumMasterNodesIT.java | 15 ++- .../cluster/SpecificMasterNodesIT.java | 6 +- .../UnsafeBootstrapAndDetachCommandIT.java | 126 ++++++++++++------ .../cluster/routing/AllocationIdIT.java | 6 +- .../cluster/routing/DelayedAllocationIT.java | 6 +- .../cluster/routing/PrimaryAllocationIT.java | 15 ++- .../elasticsearch/env/NodeEnvironmentIT.java | 10 +- .../env/NodeRepurposeCommandIT.java | 36 +++-- .../env/NodeRepurposeCommandTests.java | 8 +- .../gateway/RecoveryFromGatewayIT.java | 9 +- .../RemoveCorruptedShardDataCommandIT.java | 75 +++++------ .../memory/breaker/CircuitBreakerNoopIT.java | 2 - .../indices/recovery/IndexRecoveryIT.java | 10 +- .../store/IndicesStoreIntegrationIT.java | 11 +- .../elasticsearch/test/ESIntegTestCase.java | 2 - .../test/InternalTestCluster.java | 48 ++++--- .../test/test/InternalTestClusterTests.java | 51 +++---- .../elasticsearch/xpack/CcrIntegTestCase.java | 2 - .../integration/BasicDistributedJobsIT.java | 7 +- .../integration/MlDistributedFailureIT.java | 4 +- 24 files changed, 270 insertions(+), 209 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index e7ffb88f13702..c0bf2a5dccee5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -382,7 +382,6 @@ class ClusterFormationTasks { // Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master 'discovery.initial_state_timeout' : '0s' ] - esConfig['node.max_local_storage_nodes'] = node.config.numNodes esConfig['http.port'] = node.config.httpPort if (node.nodeVersion.onOrAfter('6.7.0')) { esConfig['transport.port'] = node.config.transportPort diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java index 8c3976d2b175c..6c79c68df1df1 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -91,9 +91,15 @@ public void testScriptDisabled() throws Exception { checkPipelineExists.accept(pipelineIdWithoutScript); - internalCluster().stopCurrentMasterNode(); - internalCluster().startNode(Settings.builder().put("script.allowed_types", "none")); + internalCluster().restartNode(internalCluster().getMasterName(), new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder().put("script.allowed_types", "none").build(); + } + + }); + checkPipelineExists.accept(pipelineIdWithoutScript); checkPipelineExists.accept(pipelineIdWithScript); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index 941ad3c658aba..25765ab1ee667 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -277,6 +277,8 @@ public void testUnassignedReplicaWithPriorCopy() throws Exception { nodes.remove(primaryNodeName); logger.info("--> shutting down all nodes except the one that holds the primary"); + Settings node0DataPathSettings = internalCluster().dataPathSettings(nodes.get(0)); + Settings node1DataPathSettings = internalCluster().dataPathSettings(nodes.get(1)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(0))); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(1))); ensureStableCluster(1); @@ -286,8 +288,8 @@ public void testUnassignedReplicaWithPriorCopy() throws Exception { Settings.builder().put("index.routing.allocation.include._name", primaryNodeName)).get(); logger.info("--> restarting the stopped nodes"); - internalCluster().startNode(Settings.builder().put("node.name", nodes.get(0)).build()); - internalCluster().startNode(Settings.builder().put("node.name", nodes.get(1)).build()); + internalCluster().startNode(Settings.builder().put("node.name", nodes.get(0)).put(node0DataPathSettings).build()); + internalCluster().startNode(Settings.builder().put("node.name", nodes.get(1)).put(node1DataPathSettings).build()); ensureStableCluster(3); boolean includeYesDecisions = randomBoolean(); @@ -1017,6 +1019,7 @@ public void testCannotAllocateStaleReplicaExplanation() throws Exception { // start replica node first, so it's path will be used first when we start a node after // stopping all of them at end of test. final String replicaNode = internalCluster().startNode(); + Settings replicaDataPathSettings = internalCluster().dataPathSettings(replicaNode); final String primaryNode = internalCluster().startNode(); prepareIndex(IndexMetaData.State.OPEN, 1, 1, @@ -1057,7 +1060,7 @@ public void testCannotAllocateStaleReplicaExplanation() throws Exception { internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); logger.info("--> restart the node with the stale replica"); - String restartedNode = internalCluster().startDataOnlyNode(); + String restartedNode = internalCluster().startDataOnlyNode(replicaDataPathSettings); ensureClusterSizeConsistency(); // wait for the master to finish processing join. // wait until the system has fetched shard data and we know there is no valid shard copy diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 99f89548524f5..aa897f10bb895 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.Store; @@ -105,15 +104,6 @@ public void blockActions(String... actions) { } } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - // manual collection or upon cluster forming. - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) - .build(); - } - @Override protected Collection> nodePlugins() { return Arrays.asList(TestPlugin.class, MockTransportService.TestPlugin.class); diff --git a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 4bd5d2e675b75..cb1443bdf3765 100644 --- a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -43,10 +44,12 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -124,6 +127,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { logger.info("--> add voting config exclusion for non-master node, to be sure it's not elected"); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(new String[]{otherNode})).get(); logger.info("--> stop master node, no master block should appear"); + Settings masterDataPathSettings = internalCluster().dataPathSettings(masterNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNode)); awaitBusy(() -> { @@ -137,7 +141,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { assertThat(state.nodes().getMasterNode(), equalTo(null)); logger.info("--> starting the previous master node again..."); - node2Name = internalCluster().startNode(settings); + node2Name = internalCluster().startNode(Settings.builder().put(settings).put(masterDataPathSettings).build()); clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet(); @@ -169,6 +173,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { logger.info("--> add voting config exclusion for master node, to be sure it's not elected"); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(new String[]{masterNode})).get(); logger.info("--> stop non-master node, no master block should appear"); + Settings otherNodeDataPathSettings = internalCluster().dataPathSettings(otherNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(otherNode)); assertBusy(() -> { @@ -177,7 +182,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { }); logger.info("--> starting the previous master node again..."); - internalCluster().startNode(settings); + internalCluster().startNode(Settings.builder().put(settings).put(otherNodeDataPathSettings).build()); ensureGreen(); clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) @@ -251,6 +256,10 @@ public void testThreeNodesNoMasterBlock() throws Exception { assertHitCount(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100); } + List nonMasterNodes = new ArrayList<>(Sets.difference(Sets.newHashSet(internalCluster().getNodeNames()), + Collections.singleton(internalCluster().getMasterName()))); + Settings nonMasterDataPathSettings1 = internalCluster().dataPathSettings(nonMasterNodes.get(0)); + Settings nonMasterDataPathSettings2 = internalCluster().dataPathSettings(nonMasterNodes.get(1)); internalCluster().stopRandomNonMasterNode(); internalCluster().stopRandomNonMasterNode(); @@ -262,7 +271,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { }); logger.info("--> start back the 2 nodes "); - internalCluster().startNodes(2, settings); + internalCluster().startNodes(nonMasterDataPathSettings1, nonMasterDataPathSettings2); internalCluster().validateClusterFormed(); ensureGreen(); diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 5ce996a2e77fd..f80a5befa83d9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -65,6 +65,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> stop master node"); + Settings masterDataPathSettings = internalCluster().dataPathSettings(internalCluster().getMasterName()); internalCluster().stopCurrentMasterNode(); try { @@ -75,9 +76,10 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { // all is well, no master elected } - logger.info("--> start master node"); + logger.info("--> start previous master node again"); final String nextMasterEligibleNodeName = internalCluster() - .startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + .startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true) + .put(masterDataPathSettings)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState() diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 88cd5bdee1278..3bbf8378483dd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -79,26 +79,26 @@ private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environmen return terminal; } - private MockTerminal unsafeBootstrap(Environment environment, int nodeOrdinal, boolean abort) throws Exception { - final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, nodeOrdinal, abort); + private MockTerminal unsafeBootstrap(Environment environment, boolean abort) throws Exception { + final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, 0, abort); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG)); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); return terminal; } - private MockTerminal detachCluster(Environment environment, int nodeOrdinal, boolean abort) throws Exception { - final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, nodeOrdinal, abort); + private MockTerminal detachCluster(Environment environment, boolean abort) throws Exception { + final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, 0, abort); assertThat(terminal.getOutput(), containsString(DetachClusterCommand.CONFIRMATION_MSG)); assertThat(terminal.getOutput(), containsString(DetachClusterCommand.NODE_DETACHED_MSG)); return terminal; } private MockTerminal unsafeBootstrap(Environment environment) throws Exception { - return unsafeBootstrap(environment, 0, false); + return unsafeBootstrap(environment, false); } private MockTerminal detachCluster(Environment environment) throws Exception { - return detachCluster(environment, 0, false); + return detachCluster(environment, false); } private void expectThrows(ThrowingRunnable runnable, String message) { @@ -151,7 +151,7 @@ public void testBootstrapNoNodeMetaData() throws IOException { } public void testBootstrapNotBootstrappedCluster() throws Exception { - internalCluster().startNode( + String node = internalCluster().startNode( Settings.builder() .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup .build()); @@ -161,14 +161,17 @@ public void testBootstrapNotBootstrappedCluster() throws Exception { assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); } public void testDetachNotBootstrappedCluster() throws Exception { - internalCluster().startNode( + String node = internalCluster().startNode( Settings.builder() .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup .build()); @@ -178,19 +181,24 @@ public void testDetachNotBootstrappedCluster() throws Exception { assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); } public void testBootstrapNoManifestFile() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); @@ -198,11 +206,13 @@ public void testBootstrapNoManifestFile() throws IOException { public void testDetachNoManifestFile() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); @@ -210,12 +220,14 @@ public void testDetachNoManifestFile() throws IOException { public void testBootstrapNoMetaData() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); @@ -223,12 +235,14 @@ public void testBootstrapNoMetaData() throws IOException { public void testDetachNoMetaData() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); @@ -236,22 +250,26 @@ public void testDetachNoMetaData() throws IOException { public void testBootstrapAbortedByUser() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> unsafeBootstrap(environment, 0, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); + expectThrows(() -> unsafeBootstrap(environment, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); } public void testDetachAbortedByUser() throws IOException { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); internalCluster().stopRandomDataNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> detachCluster(environment, 0, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); + Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); + expectThrows(() -> detachCluster(environment, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); } public void test3MasterNodes2Failed() throws Exception { @@ -278,6 +296,11 @@ public void test3MasterNodes2Failed() throws Exception { createIndex("test"); ensureGreen("test"); + Settings master1DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(0)); + Settings master2DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(1)); + Settings master3DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(2)); + Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode); + logger.info("--> stop 2nd and 3d master eligible node"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(1))); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(2))); @@ -290,8 +313,9 @@ public void test3MasterNodes2Failed() throws Exception { }); logger.info("--> try to unsafely bootstrap 1st master-eligible node, while node lock is held"); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + Environment environmentMaster1 = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(master1DataPathSettings).build()); + expectThrows(() -> unsafeBootstrap(environmentMaster1), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); logger.info("--> stop 1st master-eligible node and data-only node"); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); @@ -299,20 +323,22 @@ public void test3MasterNodes2Failed() throws Exception { internalCluster().stopRandomDataNode(); logger.info("--> unsafely-bootstrap 1st master-eligible node"); - MockTerminal terminal = unsafeBootstrap(environment); + MockTerminal terminal = unsafeBootstrap(environmentMaster1); MetaData metaData = MetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodeEnvironment.nodeDataPaths()); assertThat(terminal.getOutput(), containsString( String.format(Locale.ROOT, UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, metaData.coordinationMetaData().term(), metaData.version()))); logger.info("--> start 1st master-eligible node"); - internalCluster().startMasterOnlyNode(); + internalCluster().startMasterOnlyNode(master1DataPathSettings); logger.info("--> detach-cluster on data-only node"); - detachCluster(environment, 1, false); + Environment environmentData = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataNodeDataPathSettings).build()); + detachCluster(environmentData, false); logger.info("--> start data-only node"); - String dataNode2 = internalCluster().startDataOnlyNode(); + String dataNode2 = internalCluster().startDataOnlyNode(dataNodeDataPathSettings); logger.info("--> ensure there is no NO_MASTER_BLOCK and unsafe-bootstrap is reflected in cluster state"); assertBusy(() -> { @@ -326,11 +352,16 @@ public void test3MasterNodes2Failed() throws Exception { ensureGreen("test"); logger.info("--> detach-cluster on 2nd and 3rd master-eligible nodes"); - detachCluster(environment, 2, false); - detachCluster(environment, 3, false); + Environment environmentMaster2 = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(master2DataPathSettings).build()); + detachCluster(environmentMaster2, false); + Environment environmentMaster3 = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(master3DataPathSettings).build()); + detachCluster(environmentMaster3, false); logger.info("--> start 2nd and 3rd master-eligible nodes and ensure 4 nodes stable cluster"); - internalCluster().startMasterOnlyNodes(2); + internalCluster().startMasterOnlyNode(master2DataPathSettings); + internalCluster().startMasterOnlyNode(master3DataPathSettings); ensureStableCluster(4); } @@ -353,9 +384,11 @@ public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Excepti assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); logger.info("--> stop data-only node and detach it from the old cluster"); + Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNode)); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - detachCluster(environment, 1, false); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataNodeDataPathSettings).build()); + detachCluster(environment, false); logger.info("--> stop master-eligible node, clear its data and start it again - new cluster should form"); internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback(){ @@ -366,7 +399,7 @@ public boolean clearData(String nodeName) { }); logger.info("--> start data-only only node and ensure 2 nodes stable cluster"); - internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNode(dataNodeDataPathSettings); ensureStableCluster(2); logger.info("--> verify that the dangling index exists and has green status"); @@ -381,15 +414,18 @@ public boolean clearData(String nodeName) { public void testNoInitialBootstrapAfterDetach() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startMasterOnlyNode(); + String masterNode = internalCluster().startMasterOnlyNode(); + Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); internalCluster().stopCurrentMasterNode(); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build()); detachCluster(environment); String node = internalCluster().startMasterOnlyNode(Settings.builder() // give the cluster 2 seconds to elect the master (it should not) .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "2s") + .put(masterNodeDataPathSettings) .build()); ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true) @@ -401,7 +437,8 @@ public void testNoInitialBootstrapAfterDetach() throws Exception { public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetaData() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startMasterOnlyNode(); + String masterNode = internalCluster().startMasterOnlyNode(); + Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")); internalCluster().client().admin().cluster().updateSettings(req).get(); @@ -412,11 +449,12 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetaData( internalCluster().stopCurrentMasterNode(); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build()); detachCluster(environment); unsafeBootstrap(environment); - internalCluster().startMasterOnlyNode(); + internalCluster().startMasterOnlyNode(masterNodeDataPathSettings); ensureGreen(); state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -430,8 +468,10 @@ private static class SimulatedDeleteFailureException extends RuntimeException { public void testCleanupOldMetaDataFails() throws Exception { // establish some metadata. internalCluster().setBootstrapMasterNodeIndex(0); - internalCluster().startNode(); - Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build()); internalCluster().stopRandomDataNode(); // find data paths diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java index c6d7f925a2c85..d92624f539c9e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdIT.java @@ -105,6 +105,8 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale internalCluster().assertSameDocIdsOnShards(); // initial set up is done + Settings node1DataPathSettings = internalCluster().dataPathSettings(node1); + Settings node2DataPathSettings = internalCluster().dataPathSettings(node2); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1)); // index more docs to node2 that marks node1 as stale @@ -117,7 +119,7 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale putFakeCorruptionMarker(indexSettings, shardId, indexPath); // thanks to master node1 is out of sync - node1 = internalCluster().startNode(); + node1 = internalCluster().startNode(node1DataPathSettings); // there is only _stale_ primary checkNoValidShardCopy(indexName, shardId); @@ -157,7 +159,7 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale ensureYellow(indexName); // bring node2 back - node2 = internalCluster().startNode(); + node2 = internalCluster().startNode(node2DataPathSettings); ensureGreen(indexName); assertThat(historyUUID(node1, indexName), not(equalTo(historyUUID))); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index c175624125e50..e11c2ddaa79da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -67,11 +67,13 @@ public void testDelayedAllocationNodeLeavesAndComesBack() throws Exception { .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1))).get(); ensureGreen("test"); indexRandomData(); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); + String nodeWithShard = findNodeWithShard(); + Settings nodeWithShardDataPathSettings = internalCluster().dataPathSettings(nodeWithShard); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithShard)); assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState() .getRoutingNodes().unassigned().size() > 0, equalTo(true))); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); - internalCluster().startNode(); // this will use the same data location as the stopped node + internalCluster().startNode(nodeWithShardDataPathSettings); // this will use the same data location as the stopped node ensureGreen("test"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index f4b834e4d29a6..00a2f5e34a791 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -118,7 +118,8 @@ public void testBulkWeirdScenario() throws Exception { assertThat(bulkResponse.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.UPDATED)); } - private void createStaleReplicaScenario(String master) throws Exception { + // returns data paths settings of in-sync shard copy + private Settings createStaleReplicaScenario(String master) throws Exception { client().prepareIndex("test", "type1").setSource(jsonBuilder() .startObject().field("field", "value1").endObject()).get(); refresh(); @@ -150,6 +151,7 @@ private void createStaleReplicaScenario(String master) throws Exception { .startObject().field("field", "value1").endObject()).get(); logger.info("--> shut down node that has new acknowledged document"); + final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); ensureStableCluster(1, master); @@ -167,6 +169,7 @@ private void createStaleReplicaScenario(String master) throws Exception { // kick reroute a second time and check that all shards are unassigned assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); + return inSyncDataPathSettings; } public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception { @@ -177,10 +180,10 @@ public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception .setSettings(Settings.builder().put("index.number_of_shards", 1) .put("index.number_of_replicas", 1)).get()); ensureGreen(); - createStaleReplicaScenario(master); + final Settings inSyncDataPathSettings = createStaleReplicaScenario(master); logger.info("--> starting node that reuses data folder with the up-to-date primary shard"); - internalCluster().startDataOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNode(inSyncDataPathSettings); logger.info("--> check that the up-to-date primary shard gets promoted and that documents are available"); ensureYellow("test"); @@ -373,6 +376,7 @@ public void testDoNotRemoveAllocationIdOnNodeLeave() throws Exception { .put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY); ensureGreen("test"); + final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); ensureYellow("test"); assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test") @@ -390,7 +394,7 @@ public boolean clearData(String nodeName) { .metaData().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); - internalCluster().startDataOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNode(inSyncDataPathSettings); ensureGreen("test"); } @@ -402,6 +406,7 @@ public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception { 1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY); ensureGreen("test"); + final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); ensureYellow("test"); assertEquals(2, client().admin().cluster().prepareState().get().getState() @@ -424,7 +429,7 @@ public boolean clearData(String nodeName) { .metaData().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); - internalCluster().startDataOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNode(inSyncDataPathSettings); assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState() .getRoutingTable().index("test").allPrimaryShardsUnassigned())); } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java index 37e260a01d069..74de578426f2c 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -39,7 +39,8 @@ public void testStartFailureOnDataForNonDataNode() throws Exception { final String indexName = "test-fail-on-data"; logger.info("--> starting one node"); - internalCluster().startNode(); + String node = internalCluster().startNode(); + Settings dataPathSettings = internalCluster().dataPathSettings(node); logger.info("--> creating index"); prepareCreate(indexName, Settings.builder() @@ -69,13 +70,12 @@ public Settings onNodeStopped(String nodeName) { + Node.NODE_MASTER_SETTING.getKey() + "=false, but has index metadata")); - // client() also starts the node + logger.info("--> start the node again with node.data=true and node.master=true"); + internalCluster().startNode(dataPathSettings); + logger.info("--> indexing a simple document"); client().prepareIndex(indexName, "type1", "1").setSource("field1", "value1").get(); - logger.info("--> restarting the node with node.data=true and node.master=true"); - internalCluster().restartRandomDataNode(); - logger.info("--> restarting the node with node.data=false"); ex = expectThrows(IllegalStateException.class, "Node started with node.data=false while having existing shard data must fail", diff --git a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java index c07d710f60508..a6229b16c3055 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java @@ -37,8 +37,8 @@ public void testRepurpose() throws Exception { final String indexName = "test-repurpose"; logger.info("--> starting two nodes"); - internalCluster().startMasterOnlyNode(); - internalCluster().startDataOnlyNode(); + final String masterNode = internalCluster().startMasterOnlyNode(); + final String dataNode = internalCluster().startDataOnlyNode(); logger.info("--> creating index"); prepareCreate(indexName, Settings.builder() @@ -54,31 +54,44 @@ public void testRepurpose() throws Exception { assertTrue(client().prepareGet(indexName, "type1", "1").get().isExists()); + final Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); + final Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode); + final Settings noMasterNoDataSettings = Settings.builder() .put(Node.NODE_DATA_SETTING.getKey(), false) .put(Node.NODE_MASTER_SETTING.getKey(), false) .build(); + final Settings noMasterNoDataSettingsForMasterNode = Settings.builder() + .put(noMasterNoDataSettings) + .put(masterNodeDataPathSettings) + .build(); + + final Settings noMasterNoDataSettingsForDataNode = Settings.builder() + .put(noMasterNoDataSettings) + .put(dataNodeDataPathSettings) + .build(); + internalCluster().stopRandomDataNode(); // verify test setup logger.info("--> restarting node with node.data=false and node.master=false"); IllegalStateException ex = expectThrows(IllegalStateException.class, "Node started with node.data=false and node.master=false while having existing index metadata must fail", - () -> internalCluster().startCoordinatingOnlyNode(Settings.EMPTY) + () -> internalCluster().startCoordinatingOnlyNode(dataNodeDataPathSettings) ); logger.info("--> Repurposing node 1"); - executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 1, 1); + executeRepurposeCommand(noMasterNoDataSettingsForDataNode, indexUUID, 1); ElasticsearchException lockedException = expectThrows(ElasticsearchException.class, - () -> executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 0, 1) + () -> executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, indexUUID, 1) ); assertThat(lockedException.getMessage(), containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG)); logger.info("--> Starting node after repurpose"); - internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + internalCluster().startCoordinatingOnlyNode(dataNodeDataPathSettings); assertTrue(indexExists(indexName)); expectThrows(NoShardAvailableActionException.class, () -> client().prepareGet(indexName, "type1", "1").get()); @@ -88,12 +101,12 @@ public void testRepurpose() throws Exception { internalCluster().stopRandomNode(s -> true); internalCluster().stopRandomNode(s -> true); - executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 0, 0); + executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, indexUUID, 0); // by restarting as master and data node, we can check that the index definition was really deleted and also that the tool // does not mess things up so much that the nodes cannot boot as master or data node any longer. - internalCluster().startMasterOnlyNode(); - internalCluster().startDataOnlyNode(); + internalCluster().startMasterOnlyNode(masterNodeDataPathSettings); + internalCluster().startDataOnlyNode(dataNodeDataPathSettings); ensureGreen(); @@ -101,8 +114,7 @@ public void testRepurpose() throws Exception { assertFalse(indexExists(indexName)); } - private void executeRepurposeCommandForOrdinal(Settings settings, String indexUUID, int ordinal, - int expectedShardCount) throws Exception { + private void executeRepurposeCommand(Settings settings, String indexUUID, int expectedShardCount) throws Exception { boolean verbose = randomBoolean(); Settings settingsWithPath = Settings.builder().put(internalCluster().getDefaultSettings()).put(settings).build(); int expectedIndexCount = TestEnvironment.newEnvironment(settingsWithPath).dataFiles().length; @@ -111,6 +123,6 @@ private void executeRepurposeCommandForOrdinal(Settings settings, String indexUU not(contains(NodeRepurposeCommand.PRE_V7_MESSAGE)), NodeRepurposeCommandTests.conditionalNot(containsString(indexUUID), verbose == false)); NodeRepurposeCommandTests.verifySuccess(settingsWithPath, matcher, - verbose, ordinal); + verbose); } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java index 436439d64db1f..8f713e57bf4da 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java @@ -190,14 +190,10 @@ public void testCleanupShardData() throws Exception { } } - private static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose) throws Exception { - verifySuccess(settings, outputMatcher, verbose, 0); - } - - static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose, int ordinal) throws Exception { + static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose) throws Exception { withTerminal(verbose, outputMatcher, terminal -> { terminal.addTextInput(randomFrom("y", "Y")); - executeRepurposeCommand(terminal, settings, ordinal); + executeRepurposeCommand(terminal, settings, 0); assertThat(terminal.getOutput(), containsString("Node successfully repurposed")); }); } diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 3ea0663d7d4c0..962788f09d23b 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -64,6 +64,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.IntStream; @@ -339,7 +340,9 @@ public boolean clearData(String nodeName) { public void testLatestVersionLoaded() throws Exception { // clean two nodes - internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()); + List nodes = internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()); + Settings node1DataPathSettings = internalCluster().dataPathSettings(nodes.get(0)); + Settings node2DataPathSettings = internalCluster().dataPathSettings(nodes.get(1)); assertAcked(client().admin().indices().prepareCreate("test")); client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute() @@ -393,7 +396,9 @@ public void testLatestVersionLoaded() throws Exception { logger.info("--> starting the two nodes back"); - internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()); + internalCluster().startNodes( + Settings.builder().put(node1DataPathSettings).put("gateway.recover_after_nodes", 2).build(), + Settings.builder().put(node2DataPathSettings).put("gateway.recover_after_nodes", 2).build()); logger.info("--> running cluster_health (wait for the shards to startup)"); ensureGreen(); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index 7ca2e0a64070b..b7881adf76285 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -140,7 +140,10 @@ public void testCorruptIndex() throws Exception { final MockTerminal terminal = new MockTerminal(); final OptionParser parser = command.getParser(); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); + final Settings nodePathSettings = internalCluster().dataPathSettings(node); + + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(nodePathSettings).build()); final OptionSet options = parser.parse("-index", indexName, "-shard-id", "0"); // Try running it before the node is stopped (and shard is closed) @@ -305,6 +308,9 @@ public void testCorruptTranslogTruncation() throws Exception { logger.info("--> performed extra flushing on replica"); } + final Settings node1PathSettings = internalCluster().dataPathSettings(node1); + final Settings node2PathSettings = internalCluster().dataPathSettings(node2); + // shut down the replica node to be tested later internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node2)); @@ -348,8 +354,8 @@ public Settings onNodeStopped(String nodeName) throws Exception { } }); - final Settings defaultSettings = internalCluster().getDefaultSettings(); - final Environment environment = TestEnvironment.newEnvironment(defaultSettings); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(node1PathSettings).build()); terminal.addTextInput("y"); OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); @@ -411,7 +417,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertHitCount(client().prepareSearch(indexName).setQuery(matchAllQuery()).get(), numDocsToKeep); logger.info("--> starting the replica node to test recovery"); - internalCluster().startNode(); + internalCluster().startNode(node2PathSettings); ensureGreen(indexName); for (String node : internalCluster().nodesInclude(indexName)) { SearchRequestBuilder q = client().prepareSearch(indexName).setPreference("_only_nodes:" + node).setQuery(matchAllQuery()); @@ -473,7 +479,10 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { final ShardId shardId = new ShardId(resolveIndex(indexName), 0); final Set translogDirs = getDirs(node2, shardId, ShardPath.TRANSLOG_FOLDER_NAME); - // stop data nodes. After the restart the 1st node will be primary and the 2nd node will be replica + final Settings node1PathSettings = internalCluster().dataPathSettings(node1); + final Settings node2PathSettings = internalCluster().dataPathSettings(node2); + + // stop data nodes internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode(); @@ -481,53 +490,32 @@ public void testCorruptTranslogTruncationOfReplica() throws Exception { logger.info("--> corrupting translog"); TestTranslog.corruptRandomTranslogFile(logger, random(), translogDirs); - // Restart the single node + // Start the node with the non-corrupted data path logger.info("--> starting node"); - internalCluster().startNode(); + internalCluster().startNode(node1PathSettings); ensureYellow(); // Run a search and make sure it succeeds assertHitCount(client().prepareSearch(indexName).setQuery(matchAllQuery()).get(), totalDocs); + // check replica corruption final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); final MockTerminal terminal = new MockTerminal(); final OptionParser parser = command.getParser(); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - - internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - logger.info("--> node {} stopped", nodeName); - for (Path translogDir : translogDirs) { - final Path idxLocation = translogDir.getParent().resolve(ShardPath.INDEX_FOLDER_NAME); - assertBusy(() -> { - logger.info("--> checking that lock has been released for {}", idxLocation); - try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE); - Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - // Great, do nothing, we just wanted to obtain the lock - } catch (LockObtainFailedException lofe) { - logger.info("--> failed acquiring lock for {}", idxLocation); - fail("still waiting for lock release at [" + idxLocation + "]"); - } catch (IOException ioe) { - fail("Got an IOException: " + ioe); - } - }); - - terminal.addTextInput("y"); - OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); - logger.info("--> running command for [{}]", translogDir.toAbsolutePath()); - command.execute(terminal, options, environment); - logger.info("--> output:\n{}", terminal.getOutput()); - } - - return super.onNodeStopped(nodeName); - } - }); + for (Path translogDir : translogDirs) { + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(node2PathSettings).build()); + terminal.addTextInput("y"); + OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); + logger.info("--> running command for [{}]", translogDir.toAbsolutePath()); + command.execute(terminal, options, environment); + logger.info("--> output:\n{}", terminal.getOutput()); + } logger.info("--> starting the replica node to test recovery"); - internalCluster().startNode(); + internalCluster().startNode(node2PathSettings); ensureGreen(indexName); for (String node : internalCluster().nodesInclude(indexName)) { assertHitCount(client().prepareSearch(indexName) @@ -574,15 +562,18 @@ public void testResolvePath() throws Exception { final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); final OptionParser parser = command.getParser(); - final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); - final Map indexPathByNodeName = new HashMap<>(); + final Map environmentByNodeName = new HashMap<>(); for (String nodeName : nodeNames) { final String nodeId = nodeNameToNodeId.get(nodeName); final Set indexDirs = getDirs(nodeId, shardId, ShardPath.INDEX_FOLDER_NAME); assertThat(indexDirs, hasSize(1)); indexPathByNodeName.put(nodeName, indexDirs.iterator().next()); + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(internalCluster().dataPathSettings(nodeName)).build()); + environmentByNodeName.put(nodeName, environment); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeName)); logger.info(" -- stopped {}", nodeName); } @@ -590,7 +581,7 @@ public void testResolvePath() throws Exception { for (String nodeName : nodeNames) { final Path indexPath = indexPathByNodeName.get(nodeName); final OptionSet options = parser.parse("--dir", indexPath.toAbsolutePath().toString()); - command.findAndProcessShardPath(options, environment, + command.findAndProcessShardPath(options, environmentByNodeName.get(nodeName), shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); } } diff --git a/server/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java b/server/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java index 627eb74007bae..02eab6dc0aad2 100644 --- a/server/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java +++ b/server/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -41,7 +40,6 @@ public class CircuitBreakerNoopIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop") // This is set low, because if the "noop" is not a noop, it will break .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 3130cebad7097..0ea8eb8e9b447 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -853,8 +853,12 @@ public void testHistoryRetention() throws Exception { flush(indexName); } - internalCluster().stopRandomNode(s -> true); - internalCluster().stopRandomNode(s -> true); + String firstNodeToStop = randomFrom(internalCluster().getNodeNames()); + Settings firstNodeToStopDataPathSettings = internalCluster().dataPathSettings(firstNodeToStop); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(firstNodeToStop)); + String secondNodeToStop = randomFrom(internalCluster().getNodeNames()); + Settings secondNodeToStopDataPathSettings = internalCluster().dataPathSettings(secondNodeToStop); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(secondNodeToStop)); final long desyncNanoTime = System.nanoTime(); while (System.nanoTime() <= desyncNanoTime) { @@ -871,7 +875,7 @@ public void testHistoryRetention() throws Exception { assertAcked(client().admin().indices().prepareUpdateSettings(indexName) .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1))); - internalCluster().startNode(); + internalCluster().startNode(randomFrom(firstNodeToStopDataPathSettings, secondNodeToStopDataPathSettings)); ensureGreen(indexName); final RecoveryResponse recoveryResponse = client().admin().indices().recoveries(new RecoveryRequest(indexName)).get(); diff --git a/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 23c1a837d4346..f388e49b31fb2 100644 --- a/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -74,7 +74,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { // simplify this and only use a single data path - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(Environment.PATH_DATA_SETTING.getKey(), "") + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(Environment.PATH_DATA_SETTING.getKey(), createTempDir()) // by default this value is 1 sec in tests (30 sec in practice) but we adding disruption here // which is between 1 and 2 sec can cause each of the shard deletion requests to timeout. // to prevent this we are setting the timeout here to something highish ie. the default in practice @@ -335,8 +335,11 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.debug("--> shutting down two random nodes"); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3)); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3)); + List nodesToShutDown = randomSubsetOf(2, node1, node2, node3); + Settings node1DataPathSettings = internalCluster().dataPathSettings(nodesToShutDown.get(0)); + Settings node2DataPathSettings = internalCluster().dataPathSettings(nodesToShutDown.get(1)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesToShutDown.get(0))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesToShutDown.get(1))); logger.debug("--> verifying index is red"); ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); @@ -369,7 +372,7 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { logger.debug("--> starting the two old nodes back"); - internalCluster().startDataOnlyNodes(2); + internalCluster().startNodes(node1DataPathSettings, node2DataPathSettings); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("5").get().isTimedOut()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 134a91ac296c1..d45c83444b2fc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -107,7 +107,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; @@ -1777,7 +1776,6 @@ private int getNumClientNodes() { */ protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE) // Default the watermarks to absurdly low to prevent the tests // from failing on nodes without enough disk space .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 7ff928c4413d2..2e88a018e5a0d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -138,6 +138,7 @@ import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.IntStream; import java.util.stream.Stream; import static java.util.Collections.emptyList; @@ -238,6 +239,8 @@ public final class InternalTestCluster extends TestCluster { private final boolean forbidPrivateIndexSettings; + private final int numDataPaths; + /** * All nodes started by the cluster will have their name set to nodePrefix followed by a positive number */ @@ -353,20 +356,8 @@ public InternalTestCluster( numSharedDedicatedMasterNodes, numSharedDataNodes, numSharedCoordOnlyNodes, autoManageMinMasterNodes ? "auto-managed" : "manual"); this.nodeConfigurationSource = nodeConfigurationSource; + numDataPaths = random.nextInt(5) == 0 ? 2 + random.nextInt(3) : 1; Builder builder = Settings.builder(); - if (random.nextInt(5) == 0) { // sometimes set this - // randomize (multi/single) data path, special case for 0, don't set it at all... - final int numOfDataPaths = random.nextInt(5); - if (numOfDataPaths > 0) { - StringBuilder dataPath = new StringBuilder(); - for (int i = 0; i < numOfDataPaths; i++) { - dataPath.append(baseDir.resolve("d" + i).toAbsolutePath()).append(','); - } - builder.put(Environment.PATH_DATA_SETTING.getKey(), dataPath.toString()); - } - } - builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE); - builder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve("custom")); builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir); builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos")); builder.put(TransportSettings.PORT.getKey(), 0); @@ -625,11 +616,24 @@ private Settings getNodeSettings(final int nodeId, final long seed, final Settin final String name = buildNodeName(nodeId, settings); - final Settings.Builder updatedSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), baseDir) // allow overriding path.home - .put(settings) - .put("node.name", name) - .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed); + final Settings.Builder updatedSettings = Settings.builder(); + + updatedSettings.put(Environment.PATH_HOME_SETTING.getKey(), baseDir); + + if (numDataPaths > 1) { + updatedSettings.putList(Environment.PATH_DATA_SETTING.getKey(), IntStream.range(0, numDataPaths).mapToObj(i -> + baseDir.resolve(name).resolve("d" + i).toString()).collect(Collectors.toList())); + } else { + updatedSettings.put(Environment.PATH_DATA_SETTING.getKey(), baseDir.resolve(name)); + } + + updatedSettings.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve(name + "-shared")); + + // allow overriding the above + updatedSettings.put(settings); + // force certain settings + updatedSettings.put("node.name", name); + updatedSettings.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed); if (autoManageMinMasterNodes) { assertThat("automatically managing min master nodes require nodes to complete a join cycle when starting", @@ -965,7 +969,7 @@ private void recreateNode(final Settings newSettings, final Runnable onTransport if (closed.get() == false) { throw new IllegalStateException("node " + name + " should be closed before recreating it"); } - // use a new seed to make sure we have new node id + // use a new seed to make sure we generate a fresh new node id if the data folder has been wiped final long newIdSeed = NodeEnvironment.NODE_ID_SEED_SETTING.get(node.settings()) + 1; Settings finalSettings = Settings.builder() .put(originalNodeSettings) @@ -1531,6 +1535,12 @@ private static T getInstanceFromNode(Class clazz, Node node) { return node.injector().getInstance(clazz); } + public Settings dataPathSettings(String node) { + return nodes.values().stream().filter(nc -> nc.name.equals(node)).findFirst().get().node().settings() + .filter(key -> + key.equals(Environment.PATH_DATA_SETTING.getKey()) || key.equals(Environment.PATH_SHARED_DATA_SETTING.getKey())); + } + @Override public int size() { return nodes.size(); diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 8461e6ade09ac..a690e4bbbdd21 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -20,7 +20,6 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.network.NetworkModule; @@ -28,6 +27,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SettingsBasedSeedHostsProvider; +import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -35,7 +35,6 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.NodeConfigurationSource; -import org.elasticsearch.transport.TransportSettings; import java.io.IOException; import java.nio.file.Files; @@ -91,20 +90,21 @@ public void testInitializiationIsConsistent() { InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, nodePrefix, Collections.emptyList(), Function.identity()); - // TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way - assertClusters(cluster0, cluster1, false); + assertClusters(cluster0, cluster1, true); } /** - * a set of settings that are expected to have different values betweem clusters, even they have been initialized with the same + * a set of settings that are expected to have different values between clusters, even they have been initialized with the same * base settings. */ static final Set clusterUniqueSettings = new HashSet<>(); static { - clusterUniqueSettings.add(ClusterName.CLUSTER_NAME_SETTING.getKey()); - clusterUniqueSettings.add(TransportSettings.PORT.getKey()); - clusterUniqueSettings.add("http.port"); + clusterUniqueSettings.add(Environment.PATH_HOME_SETTING.getKey()); + clusterUniqueSettings.add(Environment.PATH_DATA_SETTING.getKey()); + clusterUniqueSettings.add(Environment.PATH_REPO_SETTING.getKey()); + clusterUniqueSettings.add(Environment.PATH_SHARED_DATA_SETTING.getKey()); + clusterUniqueSettings.add(Environment.PATH_LOGS_SETTING.getKey()); } public static void assertClusters(InternalTestCluster cluster0, InternalTestCluster cluster1, boolean checkClusterUniqueSettings) { @@ -112,9 +112,6 @@ public static void assertClusters(InternalTestCluster cluster0, InternalTestClus Settings defaultSettings1 = cluster1.getDefaultSettings(); assertSettings(defaultSettings0, defaultSettings1, checkClusterUniqueSettings); assertThat(cluster0.numDataNodes(), equalTo(cluster1.numDataNodes())); - if (checkClusterUniqueSettings) { - assertThat(cluster0.getClusterName(), equalTo(cluster1.getClusterName())); - } } public static void assertSettings(Settings left, Settings right, boolean checkClusterUniqueSettings) { @@ -127,7 +124,7 @@ public static void assertSettings(Settings left, Settings right, boolean checkCl continue; } assertTrue("key [" + key + "] is missing in " + keys1, keys1.contains(key)); - assertEquals(right.get(key), left.get(key)); + assertEquals(key, right.get(key), left.get(key)); } } @@ -151,16 +148,11 @@ public void testBeforeTest() throws Exception { bootstrapMasterNodeIndex = maxNumDataNodes == 0 ? -1 : randomIntBetween(0, maxNumDataNodes - 1); } final int numClientNodes = randomIntBetween(0, 2); - final String clusterName1 = "shared1"; - final String clusterName2 = "shared2"; String transportClient = getTestTransportType(); NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal) { final Settings.Builder settings = Settings.builder() - .put( - NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), - 2 * ((masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes)) .put(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") .putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); @@ -185,14 +177,13 @@ public Settings transportClientSettings() { String nodePrefix = "foobar"; - Path baseDir = createTempDir(); - InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, - autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, + InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, createTempDir(), masterNodes, + autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, "clustername", nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity()); cluster0.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); - InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, - autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, + InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, createTempDir(), masterNodes, + autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, "clustername", nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity()); cluster1.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); @@ -234,9 +225,6 @@ public void testDataFolderAssignmentAndCleaning() throws IOException, Interrupte @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put( - NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), - 2 + (masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") .putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()) @@ -269,12 +257,9 @@ public Settings transportClientSettings() { String poorNode = randomValueOtherThanMany(n -> originalMasterCount == 1 && n.equals(cluster.getMasterName()), () -> randomFrom(cluster.getNodeNames())); Path dataPath = getNodePaths(cluster, poorNode)[0]; + final Settings poorNodeDataPathSettings = cluster.dataPathSettings(poorNode); final Path testMarker = dataPath.resolve("testMarker"); Files.createDirectories(testMarker); - int expectedMasterCount = originalMasterCount; - if (cluster.getInstance(ClusterService.class, poorNode).localNode().isMasterNode()) { - expectedMasterCount--; - } cluster.stopRandomNode(InternalTestCluster.nameFilter(poorNode)); assertFileExists(testMarker); // stopping a node half way shouldn't clean data @@ -285,15 +270,15 @@ public Settings transportClientSettings() { Files.createDirectories(stableTestMarker); final String newNode1 = cluster.startNode(); - expectedMasterCount++; - assertThat(getNodePaths(cluster, newNode1)[0], equalTo(dataPath)); + assertThat(getNodePaths(cluster, newNode1)[0], not(dataPath)); assertFileExists(testMarker); // starting a node should re-use data folders and not clean it final String newNode2 = cluster.startNode(); - expectedMasterCount++; final Path newDataPath = getNodePaths(cluster, newNode2)[0]; final Path newTestMarker = newDataPath.resolve("newTestMarker"); assertThat(newDataPath, not(dataPath)); Files.createDirectories(newTestMarker); + final String newNode3 = cluster.startNode(poorNodeDataPathSettings); + assertThat(getNodePaths(cluster, newNode3)[0], equalTo(dataPath)); cluster.beforeTest(random(), 0.0); assertFileNotExists(newTestMarker); // the cluster should be reset for a new test, cleaning up the extra path we made assertFileNotExists(testMarker); // a new unknown node used this path, it should be cleaned @@ -333,7 +318,6 @@ public void testDifferentRolesMaintainPathOnRestart() throws Exception { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numNodes) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0) .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") @@ -417,7 +401,6 @@ public void testTwoNodeCluster() throws Exception { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") .putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index dea3da2a3ba1b..eba3532f063bf 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -46,7 +46,6 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.DocIdSeqNoAndSource; @@ -213,7 +212,6 @@ public void afterTest() throws Exception { private NodeConfigurationSource createNodeConfigurationSource(final String leaderSeedAddress, final boolean leaderCluster) { Settings.Builder builder = Settings.builder(); - builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE); // Default the watermarks to absurdly low to prevent the tests // from failing on nodes without enough disk space builder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b"); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java index 7779f4e13d0ea..97d0824d2ac0f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; @@ -331,7 +332,7 @@ public void testMlStateAndResultsIndicesNotAvailable() throws Exception { internalCluster().ensureAtMostNumDataNodes(0); // start non ml node that will hold the state and results indices logger.info("Start non ml node:"); - internalCluster().startNode(Settings.builder() + String nonMLNode = internalCluster().startNode(Settings.builder() .put("node.data", true) .put("node.attr.ml-indices", "state-and-results") .put(MachineLearning.ML_ENABLED.getKey(), false)); @@ -389,7 +390,8 @@ public void testMlStateAndResultsIndicesNotAvailable() throws Exception { assertEquals(0, tasks.taskMap().size()); }); logger.info("Stop non ml node"); - internalCluster().stopRandomNode(settings -> settings.getAsBoolean(MachineLearning.ML_ENABLED.getKey(), false) == false); + Settings nonMLNodeDataPathSettings = internalCluster().dataPathSettings(nonMLNode); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nonMLNode)); ensureStableCluster(1); Exception e = expectThrows(ElasticsearchStatusException.class, @@ -406,6 +408,7 @@ public void testMlStateAndResultsIndicesNotAvailable() throws Exception { logger.info("Start data node"); String nonMlNode = internalCluster().startNode(Settings.builder() + .put(nonMLNodeDataPathSettings) .put("node.data", true) .put(MachineLearning.ML_ENABLED.getKey(), false)); ensureStableCluster(2, mlNode); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index b2f6a662c1167..480f85798800b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -90,7 +90,8 @@ public void testLoseDedicatedMasterNode() throws Exception { ensureStableClusterOnAllNodes(2); run("lose-dedicated-master-node-job", () -> { logger.info("Stopping dedicated master node"); - internalCluster().stopRandomNode(settings -> settings.getAsBoolean("node.master", false)); + Settings masterDataPathSettings = internalCluster().dataPathSettings(internalCluster().getMasterName()); + internalCluster().stopCurrentMasterNode(); assertBusy(() -> { ClusterState state = client(mlAndDataNode).admin().cluster().prepareState() .setLocal(true).get().getState(); @@ -98,6 +99,7 @@ public void testLoseDedicatedMasterNode() throws Exception { }); logger.info("Restarting dedicated master node"); internalCluster().startNode(Settings.builder() + .put(masterDataPathSettings) .put("node.master", true) .put("node.data", false) .put("node.ml", false) From d589cad84251e1f7bc7dbd77e407887dbb967682 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 22 May 2019 09:26:41 +0100 Subject: [PATCH 184/321] Rework discovery-ec2 docs (#41630) This commit reworks and clarifies the docs for the `discovery-ec2` plugin: - folds the tiny "Getting started with AWS" into the page on configuration - spells out the name of each setting in full instead of noting the `discovery.ec2` prefix at the top of the page. - replaces each `(Secure)` marker with a sentence describing what that means in situ - notes some missing defaults - clarifies the behaviour of `discovery.ec2.groups` (dependent on `.any_group`) - clarifies what `discovery.ec2.host_type` is for - adds `discovery.ec2.tag.TAGNAME` as a (meta-)setting rather than describing it in a separate section - notes that the tags mentioned in `discovery.ec2.tag.TAGNAME` cannot contain colons (see #38406) - clarifies the EC2-specific interface names and what they're for - reorders and rewords the recommendations for storage - expands on why you should not span a cluster across regions - adds a suggestion on protecting instances against termination during scale-in - reformat to 80 columns where possible Fixes #38406 --- docs/plugins/discovery-ec2.asciidoc | 391 ++++++++++++++++++---------- 1 file changed, 259 insertions(+), 132 deletions(-) diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index 9ec6b7bab054f..fd51bd881daf0 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -1,34 +1,52 @@ [[discovery-ec2]] === EC2 Discovery Plugin -The EC2 discovery plugin uses the https://github.com/aws/aws-sdk-java[AWS API] -to identify the addresses of seed hosts. +The EC2 discovery plugin provides a list of seed addresses to the +{ref}/modules-discovery-hosts-providers.html[discovery process] by querying the +https://github.com/aws/aws-sdk-java[AWS API] for a list of EC2 instances +matching certain criteria determined by the <>. -*If you are looking for a hosted solution of Elasticsearch on AWS, please visit http://www.elastic.co/cloud.* +*If you are looking for a hosted solution of {es} on AWS, please visit +http://www.elastic.co/cloud.* :plugin_name: discovery-ec2 include::install_remove.asciidoc[] [[discovery-ec2-usage]] -==== Getting started with AWS +==== Using the EC2 discovery plugin -The plugin adds a seed hosts provider named `ec2`. This seed hosts provider -finds other Elasticsearch instances in EC2 by querying the AWS metadata -service. Authentication is done using -http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[IAM -Role] credentials by default. To enable the plugin, configure {es} to use the -`ec2` seed hosts provider: +The `discovery-ec2` plugin allows {es} to find the master-eligible nodes in a +cluster running on AWS EC2 by querying the +https://github.com/aws/aws-sdk-java[AWS API] for the addresses of the EC2 +instances running these nodes. + +It is normally a good idea to restrict the discovery process just to the +master-eligible nodes in the cluster. This plugin allows you to identify these +nodes by certain criteria including their tags, their membership of security +groups, and their placement within availability zones. The discovery process +will work correctly even if it finds master-ineligible nodes, but master +elections will be more efficient if this can be avoided. + +The interaction with the AWS API can be authenticated using the +http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[instance +role], or else custom credentials can be supplied. + +===== Enabling EC2 discovery + +To enable EC2 discovery, configure {es} to use the `ec2` seed hosts provider: [source,yaml] ---- discovery.seed_providers: ec2 ---- -==== Settings +===== Configuring EC2 discovery -EC2 discovery supports a number of settings. Some settings are sensitive and -must be stored in the {ref}/secure-settings.html[elasticsearch keystore]. For -example, to use explicit AWS access keys: +EC2 discovery supports a number of settings. Some settings are sensitive and +must be stored in the {ref}/secure-settings.html[{es} keystore]. For example, +to authenticate using a particular access key and secret key, add these keys to +the keystore by running the following commands: [source,sh] ---- @@ -36,132 +54,163 @@ bin/elasticsearch-keystore add discovery.ec2.access_key bin/elasticsearch-keystore add discovery.ec2.secret_key ---- -The following are the available discovery settings. All should be prefixed with `discovery.ec2.`. -Those that must be stored in the keystore are marked as `Secure`. +The available settings for the EC2 discovery plugin are as follows. + +`discovery.ec2.access_key`:: -`access_key`:: + An EC2 access key. If set, you must also set `discovery.ec2.secret_key`. + If unset, `discovery-ec2` will instead use the instance role. This setting + is sensitive and must be stored in the {ref}/secure-settings.html[{es} + keystore]. - An ec2 access key. The `secret_key` setting must also be specified. (Secure) +`discovery.ec2.secret_key`:: -`secret_key`:: + An EC2 secret key. If set, you must also set `discovery.ec2.access_key`. + This setting is sensitive and must be stored in the + {ref}/secure-settings.html[{es} keystore]. - An ec2 secret key. The `access_key` setting must also be specified. (Secure) +`discovery.ec2.session_token`:: -`session_token`:: - An ec2 session token. The `access_key` and `secret_key` settings must also - be specified. (Secure) + An EC2 session token. If set, you must also set `discovery.ec2.access_key` + and `discovery.ec2.secret_key`. This setting is sensitive and must be + stored in the {ref}/secure-settings.html[{es} keystore]. -`endpoint`:: +`discovery.ec2.endpoint`:: - The ec2 service endpoint to connect to. See - http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region. This - defaults to `ec2.us-east-1.amazonaws.com`. + The EC2 service endpoint to which to connect. See + http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region to find + the appropriate endpoint for the region. This setting defaults to + `ec2.us-east-1.amazonaws.com` which is appropriate for clusters running in + the `us-east-1` region. -`protocol`:: +`discovery.ec2.protocol`:: - The protocol to use to connect to ec2. Valid values are either `http` - or `https`. Defaults to `https`. + The protocol to use to connect to the EC2 service endpoint, which may be + either `http` or `https`. Defaults to `https`. -`proxy.host`:: +`discovery.ec2.proxy.host`:: - The host name of a proxy to connect to ec2 through. + The address or host name of an HTTP proxy through which to connect to EC2. + If not set, no proxy is used. -`proxy.port`:: +`discovery.ec2.proxy.port`:: - The port of a proxy to connect to ec2 through. + When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`, + this setting determines the port to use to connect to the proxy. Defaults to + `80`. -`proxy.username`:: +`discovery.ec2.proxy.username`:: - The username to connect to the `proxy.host` with. (Secure) + When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`, + this setting determines the username to use to connect to the proxy. When + not set, no username is used. This setting is sensitive and must be stored + in the {ref}/secure-settings.html[{es} keystore]. -`proxy.password`:: +`discovery.ec2.proxy.password`:: - The password to connect to the `proxy.host` with. (Secure) + When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`, + this setting determines the password to use to connect to the proxy. When + not set, no password is used. This setting is sensitive and must be stored + in the {ref}/secure-settings.html[{es} keystore]. -`read_timeout`:: +`discovery.ec2.read_timeout`:: - The socket timeout for connecting to ec2. The value should specify the unit. For example, - a value of `5s` specifies a 5 second timeout. The default value is 50 seconds. + The socket timeout for connections to EC2, + {ref}/common-options.html#time-units[including the units]. For example, a + value of `60s` specifies a 60-second timeout. Defaults to 50 seconds. -`groups`:: +`discovery.ec2.groups`:: - Either a comma separated list or array based list of (security) groups. - Only instances with the provided security groups will be used in the - cluster discovery. (NOTE: You could provide either group NAME or group - ID.) + A list of the names or IDs of the security groups to use for discovery. The + `discovery.ec2.any_group` setting determines the behaviour of this setting. + Defaults to an empty list, meaning that security group membership is + ignored by EC2 discovery. -`host_type`:: +`discovery.ec2.any_group`:: + + Defaults to `true`, meaning that instances belonging to _any_ of the + security groups specified in `discovery.ec2.groups` will be used for + discovery. If set to `false`, only instances that belong to _all_ of the + security groups specified in `discovery.ec2.groups` will be used for + discovery. + +`discovery.ec2.host_type`:: + -- -The type of host type to use to communicate with other instances. Can be -one of `private_ip`, `public_ip`, `private_dns`, `public_dns` or `tag:TAGNAME` where -`TAGNAME` refers to a name of a tag configured for all EC2 instances. Instances which don't -have this tag set will be ignored by the discovery process. -For example if you defined a tag `my-elasticsearch-host` in ec2 and set it to `myhostname1.mydomain.com`, then -setting `host_type: tag:my-elasticsearch-host` will tell Discovery Ec2 plugin to read the host name from the -`my-elasticsearch-host` tag. In this case, it will be resolved to `myhostname1.mydomain.com`. -http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[Read more about EC2 Tags]. +Each EC2 instance has a number of different addresses that might be suitable +for discovery. This setting allows you to select which of these addresses is +used by the discovery process. It can be set to one of `private_ip`, +`public_ip`, `private_dns`, `public_dns` or `tag:TAGNAME` where `TAGNAME` +refers to a name of a tag. This setting defaults to `private_ip`. -Defaults to `private_ip`. --- +If you set `discovery.ec2.host_type` to a value of the form `tag:TAGNAME` then +the value of the tag `TAGNAME` attached to each instance will be used as that +instance's address for discovery. Instances which do not have this tag set will +be ignored by the discovery process. -`availability_zones`:: +For example if you tag some EC2 instances with a tag named +`elasticsearch-host-name` and set `host_type: tag:elasticsearch-host-name` then +the `discovery-ec2` plugin will read each instance's host name from the value +of the `elasticsearch-host-name` tag. +http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[Read more +about EC2 Tags]. - Either a comma separated list or array based list of availability zones. - Only instances within the provided availability zones will be used in the - cluster discovery. +-- -`any_group`:: +`discovery.ec2.availability_zones`:: - If set to `false`, will require all security groups to be present for the - instance to be used for the discovery. Defaults to `true`. + A list of the names of the availability zones to use for discovery. The + name of an availability zone is the + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[region + code followed by a letter], such as `us-east-1a`. Only instances placed in + one of the given availability zones will be used for discovery. -`node_cache_time`:: +[[discovery-ec2-filtering]] +`discovery.ec2.tag.TAGNAME`:: - How long the list of hosts is cached to prevent further requests to the AWS API. - Defaults to `10s`. ++ +-- -*All* secure settings of this plugin are {ref}/secure-settings.html#reloadable-secure-settings[reloadable]. -After you reload the settings, an aws sdk client with the latest settings -from the keystore will be used. +A list of the values of a tag called `TAGNAME` to use for discovery. If set, +only instances that are tagged with one of the given values will be used for +discovery. For instance, the following settings will only use nodes with a +`role` tag set to `master` and an `environment` tag set to either `dev` or +`staging`. -[IMPORTANT] -.Binding the network host -============================================== +[source,yaml] +---- +discovery.ec2.tags.role: master +discovery.ec2.tags.environment: dev,staging +---- -It's important to define `network.host` as by default it's bound to `localhost`. +NOTE: The names of tags used for discovery may only contain ASCII letters, +numbers, hyphens and underscores. In particular you cannot use tags whose name +includes a colon. -You can use {ref}/modules-network.html[core network host settings] or -<>: +-- -============================================== +`discovery.ec2.node_cache_time`:: -[[discovery-ec2-network-host]] -===== EC2 Network Host + Sets the length of time for which the collection of discovered instances is + cached. {es} waits at least this long between requests for discovery + information from the EC2 API. AWS may reject discovery requests if they are + made too often, and this would cause discovery to fail. Defaults to `10s`. -When the `discovery-ec2` plugin is installed, the following are also allowed -as valid network host settings: +All **secure** settings of this plugin are +{ref}/secure-settings.html#reloadable-secure-settings[reloadable], allowing you +to update the secure settings for this plugin without needing to restart each +node. -[cols="<,<",options="header",] -|================================================================== -|EC2 Host Value |Description -|`_ec2:privateIpv4_` |The private IP address (ipv4) of the machine. -|`_ec2:privateDns_` |The private host of the machine. -|`_ec2:publicIpv4_` |The public IP address (ipv4) of the machine. -|`_ec2:publicDns_` |The public host of the machine. -|`_ec2:privateIp_` |equivalent to `_ec2:privateIpv4_`. -|`_ec2:publicIp_` |equivalent to `_ec2:publicIpv4_`. -|`_ec2_` |equivalent to `_ec2:privateIpv4_`. -|================================================================== [[discovery-ec2-permissions]] -===== Recommended EC2 Permissions +===== Recommended EC2 permissions -EC2 discovery requires making a call to the EC2 service. You'll want to setup -an IAM policy to allow this. You can create a custom policy via the IAM -Management Console. It should look similar to this. +The `discovery-ec2` plugin works by making a `DescribeInstances` call to the AWS +EC2 API. You must configure your AWS account to allow this, which is normally +done using an IAM policy. You can create a custom policy via the IAM Management +Console. It should look similar to this. [source,js] ---- @@ -182,60 +231,138 @@ Management Console. It should look similar to this. ---- // NOTCONSOLE -[[discovery-ec2-filtering]] -===== Filtering by Tags - -The ec2 discovery plugin can also filter machines to include in the cluster -based on tags (and not just groups). The settings to use include the -`discovery.ec2.tag.` prefix. For example, if you defined a tag `stage` in EC2 -and set it to `dev`, setting `discovery.ec2.tag.stage` to `dev` will only -filter instances with a tag key set to `stage`, and a value of `dev`. Adding -multiple `discovery.ec2.tag` settings will require all of those tags to be set -for the instance to be included. - -One practical use for tag filtering is when an ec2 cluster contains many nodes -that are not master-eligible {es} nodes. In this case, tagging the ec2 -instances that _are_ running the master-eligible {es} nodes, and then filtering -by that tag, will help discovery to run more efficiently. - [[discovery-ec2-attributes]] -===== Automatic Node Attributes +===== Automatic node attributes -Though not dependent on actually using `ec2` as discovery (but still requires the `discovery-ec2` plugin installed), the -plugin can automatically add node attributes relating to ec2. In the future this may support other attributes, but this will -currently only add an `aws_availability_zone` node attribute, which is the availability zone of the current node. Attributes -can be used to isolate primary and replica shards across availability zones by using the +The `discovery-ec2` plugin can automatically set the `aws_availability_zone` +node attribute to the availability zone of each node. This node attribute +allows you to ensure that each shard has copies allocated redundantly across +multiple availability zones by using the {ref}/allocation-awareness.html[Allocation Awareness] feature. -In order to enable it, set `cloud.node.auto_attributes` to `true` in the settings. For example: +In order to enable the automatic definition of the `aws_availability_zone` +attribute, set `cloud.node.auto_attributes` to `true`. For example: [source,yaml] ---- cloud.node.auto_attributes: true - cluster.routing.allocation.awareness.attributes: aws_availability_zone ---- +The `aws_availability_zone` attribute can be automatically set like this when +using any discovery type. It is not necessary to set `discovery.seed_providers: +ec2`. However this feature does require that the `discovery-ec2` plugin is +installed. + +[[discovery-ec2-network-host]] +===== Binding to the correct address + +It is important to define `network.host` correctly when deploying a cluster on +EC2. By default each {es} node only binds to `localhost`, which will prevent it +from being discovered by nodes running on any other instances. + +You can use the {ref}/modules-network.html[core network host settings] to bind +each node to the desired address, or you can set `network.host` to one of the +following EC2-specific settings provided by the `discovery-ec2` plugin: + +[cols="<,<",options="header",] +|================================================================== +|EC2 Host Value |Description +|`_ec2:privateIpv4_` |The private IP address (ipv4) of the machine. +|`_ec2:privateDns_` |The private host of the machine. +|`_ec2:publicIpv4_` |The public IP address (ipv4) of the machine. +|`_ec2:publicDns_` |The public host of the machine. +|`_ec2:privateIp_` |Equivalent to `_ec2:privateIpv4_`. +|`_ec2:publicIp_` |Equivalent to `_ec2:publicIpv4_`. +|`_ec2_` |Equivalent to `_ec2:privateIpv4_`. +|================================================================== + +These values are acceptable when using any discovery type. They do not require +you to set `discovery.seed_providers: ec2`. However they do require that the +`discovery-ec2` plugin is installed. + [[cloud-aws-best-practices]] ==== Best Practices in AWS -Collection of best practices and other information around running Elasticsearch on AWS. +This section contains some other information about designing and managing an +{es} cluster on your own AWS infrastructure. If you would prefer to avoid these +operational details then you may be interested in a hosted {es} installation +available on AWS-based infrastructure from http://www.elastic.co/cloud. + +===== Storage + +EC2 instances offer a number of different kinds of storage. Please be aware of +the folowing when selecting the storage for your cluster: + +* http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html[Instance +Store] is recommended for {es} clusters as it offers excellent performance and +is cheaper than EBS-based storage. {es} is designed to work well with this kind +of ephemeral storage because it replicates each shard across multiple nodes. If +a node fails and its Instance Store is lost then {es} will rebuild any lost +shards from other copies. -===== Instance/Disk -When selecting disk please be aware of the following order of preference: +* https://aws.amazon.com/ebs/[EBS-based storage] may be acceptable +for smaller clusters (1-2 nodes). Be sure to use provisioned IOPS to ensure +your cluster has satisfactory performance. -* https://aws.amazon.com/efs/[EFS] - Avoid as the sacrifices made to offer durability, shared storage, and grow/shrink come at performance cost, such file systems have been known to cause corruption of indices, and due to Elasticsearch being distributed and having built-in replication, the benefits that EFS offers are not needed. -* https://aws.amazon.com/ebs/[EBS] - Works well if running a small cluster (1-2 nodes) and cannot tolerate the loss all storage backing a node easily or if running indices with no replicas. If EBS is used, then leverage provisioned IOPS to ensure performance. -* http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html[Instance Store] - When running clusters of larger size and with replicas the ephemeral nature of Instance Store is ideal since Elasticsearch can tolerate the loss of shards. With Instance Store one gets the performance benefit of having disk physically attached to the host running the instance and also the cost benefit of avoiding paying extra for EBS. +* https://aws.amazon.com/efs/[EFS-based storage] is not +recommended or supported as it does not offer satisfactory performance. +Historically, shared network filesystems such as EFS have not always offered +precisely the behaviour that {es} requires of its filesystem, and this has been +known to lead to index corruption. Although EFS offers durability, shared +storage, and the ability to grow and shrink filesystems dynamically, you can +achieve the same benefits using {es} directly. +===== Choice of AMI -Prefer https://aws.amazon.com/amazon-linux-ami/[Amazon Linux AMIs] as since Elasticsearch runs on the JVM, OS dependencies are very minimal and one can benefit from the lightweight nature, support, and performance tweaks specific to EC2 that the Amazon Linux AMIs offer. +Prefer the https://aws.amazon.com/amazon-linux-ami/[Amazon Linux AMIs] as these +allow you to benefit from the lightweight nature, support, and EC2-specific +performance enhancements that these images offer. ===== Networking -* Networking throttling takes place on smaller instance types in both the form of https://lab.getbase.com/how-we-discovered-limitations-on-the-aws-tcp-stack/[bandwidth and number of connections]. Therefore if large number of connections are needed and networking is becoming a bottleneck, avoid https://aws.amazon.com/ec2/instance-types/[instance types] with networking labeled as `Moderate` or `Low`. -* When running in multiple http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability zones] be sure to leverage {ref}/allocation-awareness.html[shard allocation awareness] so that not all copies of shard data reside in the same availability zone. -* Do not span a cluster across regions. If necessary, use a cross cluster search. -===== Misc -* If you have split your nodes into roles, consider https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[tagging the EC2 instances] by role to make it easier to filter and view your EC2 instances in the AWS console. -* Consider https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination[enabling termination protection] for all of your instances to avoid accidentally terminating a node in the cluster and causing a potentially disruptive reallocation. +* Smaller instance types have limited network performance, in terms of both +https://lab.getbase.com/how-we-discovered-limitations-on-the-aws-tcp-stack/[bandwidth +and number of connections]. If networking is a bottleneck, avoid +https://aws.amazon.com/ec2/instance-types/[instance types] with networking +labelled as `Moderate` or `Low`. + +* It is a good idea to distribute your nodes across multiple +http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability +zones] and use {ref}/allocation-awareness.html[shard allocation awareness] to +ensure that each shard has copies in more than one availability zone. + +* Do not span a cluster across regions. {es} expects that node-to-node +connections within a cluster are reasonably reliable and offer high bandwidth +and low latency, and these properties do not hold for connections between +regions. Although an {es} cluster will behave correctly when node-to-node +connections are unreliable or slow, it is not optimised for this case and its +performance may suffer. If you wish to geographically distribute your data, you +should provision multiple clusters and use features such as +{ref}/modules-cross-cluster-search.html[cross-cluster search] and +{stack-ov}/xpack-ccr.html[cross-cluster replication]. + +===== Other recommendations + +* If you have split your nodes into roles, consider +https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[tagging the +EC2 instances] by role to make it easier to filter and view your EC2 instances +in the AWS console. + +* Consider +https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination[enabling +termination protection] for all of your data and master-eligible nodes. This +will help to prevent accidental termination of these nodes which could +temporarily reduce the resilience of the cluster and which could cause a +potentially disruptive reallocation of shards. + +* If running your cluster using one or more +https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html[auto-scaling +groups], consider protecting your data and master-eligible nodes +https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection-instance[against +termination during scale-in]. This will help to prevent automatic termination +of these nodes which could temporarily reduce the resilience of the cluster and +which could cause a potentially disruptive reallocation of shards. If these +instances are protected against termination during scale-in then you can use +{ref}/shard-allocation-filtering.html[shard allocation filtering] to gracefully +migrate any data off these nodes before terminating them manually. From 23e4d4606b06a0dd082fbf1c6bfe12282466f187 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 22 May 2019 12:00:21 +0300 Subject: [PATCH 185/321] Merge claims from userinfo and ID Token correctly (#42277) Enhance the handling of merging the claims sets of the ID Token and the UserInfo response. JsonObject#merge would throw a runtime exception when attempting to merge two objects with the same key and different values. This could happen for an OP that returns different vales for the same claim in the ID Token and the UserInfo response ( Google does that for profile claim ). If a claim is contained in both sets, we attempt to merge the values if they are objects or arrays, otherwise the ID Token claim value takes presedence and overwrites the userinfo response. --- .../oidc/OpenIdConnectAuthenticator.java | 88 ++++++++++- .../oidc/OpenIdConnectAuthenticatorTests.java | 141 +++++++++++++++++- 2 files changed, 226 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java index 32cffc80071c3..c652a39b90912 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java @@ -37,6 +37,7 @@ import com.nimbusds.openid.connect.sdk.token.OIDCTokens; import com.nimbusds.openid.connect.sdk.validators.AccessTokenValidator; import com.nimbusds.openid.connect.sdk.validators.IDTokenValidator; +import net.minidev.json.JSONArray; import net.minidev.json.JSONObject; import org.apache.commons.codec.Charsets; import org.apache.http.Header; @@ -401,15 +402,16 @@ private void handleUserinfoResponse(HttpResponse httpResponse, JWTClaimsSet veri if (httpResponse.getStatusLine().getStatusCode() == 200) { if (ContentType.parse(contentHeader.getValue()).getMimeType().equals("application/json")) { final JWTClaimsSet userInfoClaims = JWTClaimsSet.parse(contentAsString); + validateUserInfoResponse(userInfoClaims, verifiedIdTokenClaims.getSubject(), claimsListener); if (LOGGER.isTraceEnabled()) { LOGGER.trace("Successfully retrieved user information: [{}]", userInfoClaims.toJSONObject().toJSONString()); } final JSONObject combinedClaims = verifiedIdTokenClaims.toJSONObject(); - combinedClaims.merge(userInfoClaims.toJSONObject()); + mergeObjects(combinedClaims, userInfoClaims.toJSONObject()); claimsListener.onResponse(JWTClaimsSet.parse(combinedClaims)); } else if (ContentType.parse(contentHeader.getValue()).getMimeType().equals("application/jwt")) { //TODO Handle validating possibly signed responses - claimsListener.onFailure(new IllegalStateException("Unable to parse Userinfo Response. Signed/encryopted JWTs are" + + claimsListener.onFailure(new IllegalStateException("Unable to parse Userinfo Response. Signed/encrypted JWTs are" + "not currently supported")); } else { claimsListener.onFailure(new IllegalStateException("Unable to parse Userinfo Response. Content type was expected to " + @@ -435,6 +437,19 @@ private void handleUserinfoResponse(HttpResponse httpResponse, JWTClaimsSet veri } } + /** + * Validates that the userinfo response contains a sub Claim and that this claim value is the same as the one returned in the ID Token + */ + private void validateUserInfoResponse(JWTClaimsSet userInfoClaims, String expectedSub, ActionListener claimsListener) { + if (userInfoClaims.getSubject().isEmpty()) { + claimsListener.onFailure(new ElasticsearchSecurityException("Userinfo Response did not contain a sub Claim")); + } else if (userInfoClaims.getSubject().equals(expectedSub) == false) { + claimsListener.onFailure(new ElasticsearchSecurityException("Userinfo Response is not valid as it is for " + + "subject [{}] while the ID Token was for subject [{}]", userInfoClaims.getSubject(), + expectedSub)); + } + } + /** * Attempts to make a request to the Token Endpoint of the OpenID Connect provider in order to exchange an * authorization code for an Id Token (and potentially an Access Token) @@ -606,6 +621,75 @@ private void setMetadataFileWatcher(String jwkSetPath) throws IOException { watcherService.add(watcher, ResourceWatcherService.Frequency.MEDIUM); } + /** + * Merges the JsonObject with the claims of the ID Token with the JsonObject with the claims of the UserInfo response. This is + * necessary as some OPs return slightly different values for some claims (i.e. Google for the profile picture) and + * {@link JSONObject#merge(Object)} would throw a runtime exception. The merging is performed based on the following rules: + *
    + *
  • If the values for a given claim are primitives (of the the same type), the value from the ID Token is retained
  • + *
  • If the values for a given claim are Objects, the values are merged
  • + *
  • If the values for a given claim are Arrays, the values are merged without removing duplicates
  • + *
  • If the values for a given claim are of different types, an exception is thrown
  • + *
+ * + * @param userInfo The JsonObject with the ID Token claims + * @param idToken The JsonObject with the UserInfo Response claims + * @return the merged JsonObject + */ + // pkg protected for testing + static JSONObject mergeObjects(JSONObject idToken, JSONObject userInfo) { + for (Map.Entry entry : idToken.entrySet()) { + Object value1 = entry.getValue(); + Object value2 = userInfo.get(entry.getKey()); + if (value2 == null) { + continue; + } + if (value1 instanceof JSONArray) { + idToken.put(entry.getKey(), mergeArrays((JSONArray) value1, value2)); + } else if (value1 instanceof JSONObject) { + idToken.put(entry.getKey(), mergeObjects((JSONObject) value1, value2)); + } else if (value1.getClass().equals(value2.getClass()) == false) { + throw new IllegalStateException("Error merging ID token and userinfo claim value for claim [" + entry.getKey() + "]. " + + "Cannot merge [" + value1.getClass().getName() + "] with [" + value2.getClass().getName() + "]"); + } + } + for (Map.Entry entry : userInfo.entrySet()) { + if (idToken.containsKey(entry.getKey()) == false) { + idToken.put(entry.getKey(), entry.getValue()); + } + } + return idToken; + } + + private static JSONObject mergeObjects(JSONObject jsonObject1, Object jsonObject2) { + if (jsonObject2 == null) { + return jsonObject1; + } + if (jsonObject2 instanceof JSONObject) { + return mergeObjects(jsonObject1, (JSONObject) jsonObject2); + } + throw new IllegalStateException("Error while merging ID token and userinfo claims. " + + "Cannot merge JSONObject with [" + jsonObject2.getClass().getName() + "]"); + } + + private static JSONArray mergeArrays(JSONArray jsonArray1, Object jsonArray2) { + if (jsonArray2 == null) { + return jsonArray1; + } + if (jsonArray2 instanceof JSONArray) { + return mergeArrays(jsonArray1, (JSONArray) jsonArray2); + } + if (jsonArray2 instanceof String) { + jsonArray1.add(jsonArray2); + } + return jsonArray1; + } + + private static JSONArray mergeArrays(JSONArray jsonArray1, JSONArray jsonArray2) { + jsonArray1.addAll(jsonArray2); + return jsonArray1; + } + protected void close() { try { this.httpClient.close(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java index 64e976d90d1e3..43b58b8d4b521 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java @@ -37,6 +37,8 @@ import com.nimbusds.openid.connect.sdk.claims.AccessTokenHash; import com.nimbusds.openid.connect.sdk.validators.IDTokenValidator; import com.nimbusds.openid.connect.sdk.validators.InvalidHashException; +import net.minidev.json.JSONArray; +import net.minidev.json.JSONObject; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; @@ -72,6 +74,7 @@ import java.util.UUID; import static java.time.Instant.now; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -96,7 +99,9 @@ public void setup() { @After public void cleanup() { - authenticator.close(); + if (authenticator != null) { + authenticator.close(); + } } private OpenIdConnectAuthenticator buildAuthenticator() throws URISyntaxException { @@ -632,6 +637,140 @@ public void testImplicitFlowFailsWithUnsignedJwt() throws Exception { assertThat(e.getCause().getMessage(), containsString("Signed ID token expected")); } + public void testJsonObjectMerging() throws Exception { + final Nonce nonce = new Nonce(); + final String subject = "janedoe"; + final Tuple keyMaterial = getRandomJwkForType(randomFrom("ES", "RS")); + final JWK jwk = keyMaterial.v2().getKeys().get(0); + RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName()); + OpenIdConnectProviderConfiguration opConfig = getOpConfig(); + JSONObject address = new JWTClaimsSet.Builder() + .claim("street_name", "12, Test St.") + .claim("locality", "New York") + .claim("region", "NY") + .claim("country", "USA") + .build() + .toJSONObject(); + JSONObject idTokenObject = new JWTClaimsSet.Builder() + .jwtID(randomAlphaOfLength(8)) + .audience(rpConfig.getClientId().getValue()) + .expirationTime(Date.from(now().plusSeconds(3600))) + .issuer(opConfig.getIssuer().getValue()) + .issueTime(Date.from(now().minusSeconds(200))) + .notBeforeTime(Date.from(now().minusSeconds(200))) + .claim("nonce", nonce) + .claim("given_name", "Jane Doe") + .claim("family_name", "Doe") + .claim("profile", "https://test-profiles.com/jane.doe") + .claim("name", "Jane") + .claim("email", "jane.doe@example.com") + .claim("roles", new JSONArray().appendElement("role1").appendElement("role2").appendElement("role3")) + .claim("address", address) + .subject(subject) + .build() + .toJSONObject(); + + JSONObject userinfoObject = new JWTClaimsSet.Builder() + .claim("given_name", "Jane Doe") + .claim("family_name", "Doe") + .claim("profile", "https://test-profiles.com/jane.doe") + .claim("name", "Jane") + .claim("email", "jane.doe@example.com") + .subject(subject) + .build() + .toJSONObject(); + + OpenIdConnectAuthenticator.mergeObjects(idTokenObject, userinfoObject); + assertTrue(idTokenObject.containsKey("given_name")); + assertTrue(idTokenObject.containsKey("family_name")); + assertTrue(idTokenObject.containsKey("profile")); + assertTrue(idTokenObject.containsKey("name")); + assertTrue(idTokenObject.containsKey("email")); + assertTrue(idTokenObject.containsKey("address")); + assertTrue(idTokenObject.containsKey("roles")); + assertTrue(idTokenObject.containsKey("nonce")); + assertTrue(idTokenObject.containsKey("sub")); + assertTrue(idTokenObject.containsKey("jti")); + assertTrue(idTokenObject.containsKey("aud")); + assertTrue(idTokenObject.containsKey("exp")); + assertTrue(idTokenObject.containsKey("iss")); + assertTrue(idTokenObject.containsKey("iat")); + assertTrue(idTokenObject.containsKey("email")); + + // Claims with different types throw an error + JSONObject wrongTypeInfo = new JWTClaimsSet.Builder() + .claim("given_name", "Jane Doe") + .claim("family_name", 123334434) + .claim("profile", "https://test-profiles.com/jane.doe") + .claim("name", "Jane") + .claim("email", "jane.doe@example.com") + .subject(subject) + .build() + .toJSONObject(); + + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + OpenIdConnectAuthenticator.mergeObjects(idTokenObject, wrongTypeInfo); + }); + + // Userinfo Claims overwrite ID Token claims + JSONObject overwriteUserInfo = new JWTClaimsSet.Builder() + .claim("given_name", "Jane Doe") + .claim("family_name", "Doe") + .claim("profile", "https://test-profiles.com/jane.doe2") + .claim("name", "Jane") + .claim("email", "jane.doe@mail.com") + .subject(subject) + .build() + .toJSONObject(); + + OpenIdConnectAuthenticator.mergeObjects(idTokenObject, overwriteUserInfo); + assertThat(idTokenObject.getAsString("email"), equalTo("jane.doe@example.com")); + assertThat(idTokenObject.getAsString("profile"), equalTo("https://test-profiles.com/jane.doe")); + + // Merging Arrays + JSONObject userInfoWithRoles = new JWTClaimsSet.Builder() + .claim("given_name", "Jane Doe") + .claim("family_name", "Doe") + .claim("profile", "https://test-profiles.com/jane.doe") + .claim("name", "Jane") + .claim("email", "jane.doe@example.com") + .claim("roles", new JSONArray().appendElement("role4").appendElement("role5")) + .subject(subject) + .build() + .toJSONObject(); + + OpenIdConnectAuthenticator.mergeObjects(idTokenObject, userInfoWithRoles); + assertThat((JSONArray) idTokenObject.get("roles"), containsInAnyOrder("role1", "role2", "role3", "role4", "role5")); + + // Merging nested objects + JSONObject addressUserInfo = new JWTClaimsSet.Builder() + .claim("street_name", "12, Test St.") + .claim("locality", "New York") + .claim("postal_code", "10024") + .build() + .toJSONObject(); + JSONObject userInfoWithAddress = new JWTClaimsSet.Builder() + .claim("given_name", "Jane Doe") + .claim("family_name", "Doe") + .claim("profile", "https://test-profiles.com/jane.doe") + .claim("name", "Jane") + .claim("email", "jane.doe@example.com") + .claim("roles", new JSONArray().appendElement("role4").appendElement("role5")) + .claim("address", addressUserInfo) + .subject(subject) + .build() + .toJSONObject(); + OpenIdConnectAuthenticator.mergeObjects(idTokenObject, userInfoWithAddress); + assertTrue(idTokenObject.containsKey("address")); + JSONObject combinedAddress = (JSONObject) idTokenObject.get("address"); + assertTrue(combinedAddress.containsKey("street_name")); + assertTrue(combinedAddress.containsKey("locality")); + assertTrue(combinedAddress.containsKey("street_name")); + assertTrue(combinedAddress.containsKey("postal_code")); + assertTrue(combinedAddress.containsKey("region")); + assertTrue(combinedAddress.containsKey("country")); + } + private OpenIdConnectProviderConfiguration getOpConfig() throws URISyntaxException { return new OpenIdConnectProviderConfiguration( new Issuer("https://op.example.com"), From dcf2929e46dfd4d593f7473f3f764554ed63dca0 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Wed, 22 May 2019 19:10:40 +1000 Subject: [PATCH 186/321] Fix settings prefix for realm truststore password (#42336) As part of #30241 realm settings were changed to be true affix settings. In the process of this change, the "ssl." prefix was lost from the realm truststore password. It should be: xpack.security.authc.realms...ssl.truststore.password Due to a mismatch between the way we define SSL settings and load SSL contexts, there was no way to define this legacy password setting in a realm config. The settings validation would reject "ssl.truststore.password" but the SSL service would ignore "truststore.password" Resolves: #41663 --- .../core/ssl/SSLConfigurationSettings.java | 2 +- .../ssl/SSLConfigurationSettingsTests.java | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java index c16035f1cabe3..ae31966a34712 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java @@ -117,7 +117,7 @@ public class SSLConfigurationSettings { public static final Setting LEGACY_TRUSTSTORE_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.truststore.password", LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE); public static final Function> LEGACY_TRUST_STORE_PASSWORD_REALM = realmType -> - Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "truststore.password", + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.truststore.password", LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE); public static final Function> TRUSTSTORE_PASSWORD_TEMPLATE = key -> diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettingsTests.java index 072f7d0d57da7..2d98dbb6aadee 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettingsTests.java @@ -5,15 +5,17 @@ */ package org.elasticsearch.xpack.core.ssl; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; - import java.util.Arrays; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; public class SSLConfigurationSettingsTests extends ESTestCase { @@ -91,4 +93,19 @@ public void testEmptySettingsParsesToDefaults() { assertThat(SSLConfigurationSettings.getKeyStoreType(ssl.truststoreType, settings, null), is("jks")); } + public void testRealmSettingPrefixes() { + SSLConfigurationSettings.getRealmSettings("_type").forEach(affix -> { + final String key = affix.getConcreteSettingForNamespace("_name").getKey(); + assertThat(key, startsWith("xpack.security.authc.realms._type._name.ssl.")); + }); + } + + public void testProfileSettingPrefixes() { + SSLConfigurationSettings.getProfileSettings().forEach(affix -> { + assertThat(affix, instanceOf(Setting.AffixSetting.class)); + final String key = ((Setting.AffixSetting) affix).getConcreteSettingForNamespace("_name").getKey(); + assertThat(key, startsWith("transport.profiles._name.xpack.security.ssl.")); + }); + } + } From 5fb55f62be68add6530cc883e317cb764bf4ad1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 22 May 2019 05:15:54 -0400 Subject: [PATCH 187/321] Remove type-related methods from QueryBuilders (#42284) Removes all deprecated type-related methods from the QueryBuilders helper class and from tests using them. Also removing related docs tests and doc pages refering to the `type` query. All removed methods have been deprecated since version 7.0. --- .../QueryDSLDocumentationTests.java | 7 --- .../query-dsl/term-level-queries.asciidoc | 6 -- docs/java-api/query-dsl/type-query.asciidoc | 15 ----- .../high-level/query-builders.asciidoc | 1 - .../query-dsl/term-level-queries.asciidoc | 8 +-- docs/reference/query-dsl/type-query.asciidoc | 19 ------ docs/reference/redirects.asciidoc | 9 +-- .../join/query/ChildQuerySearchIT.java | 2 +- .../http/ContextAndHeaderTransportIT.java | 2 +- .../index/query/QueryBuilders.java | 63 +------------------ .../highlight/HighlighterSearchIT.java | 7 ++- .../search/geo/GeoShapeIntegrationIT.java | 2 +- .../search/geo/GeoShapeQueryTests.java | 5 +- .../geo/LegacyGeoShapeIntegrationIT.java | 2 +- .../search/query/SearchQueryIT.java | 17 ++--- 15 files changed, 21 insertions(+), 144 deletions(-) delete mode 100644 docs/java-api/query-dsl/type-query.asciidoc delete mode 100644 docs/reference/query-dsl/type-query.asciidoc diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java index cfe9e98f643e6..51670b29de1b6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java @@ -74,7 +74,6 @@ import static org.elasticsearch.index.query.QueryBuilders.spanWithinQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.termsQuery; -import static org.elasticsearch.index.query.QueryBuilders.typeQuery; import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; import static org.elasticsearch.index.query.QueryBuilders.wrapperQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.exponentialDecayFunction; @@ -433,12 +432,6 @@ public void testTerms() { // end::terms } - public void testType() { - // tag::type - typeQuery("my_type"); // <1> - // end::type - } - public void testWildcard() { // tag::wildcard wildcardQuery( diff --git a/docs/java-api/query-dsl/term-level-queries.asciidoc b/docs/java-api/query-dsl/term-level-queries.asciidoc index e7d5ad4e52b74..7d3649e372bbd 100644 --- a/docs/java-api/query-dsl/term-level-queries.asciidoc +++ b/docs/java-api/query-dsl/term-level-queries.asciidoc @@ -53,10 +53,6 @@ The queries in this group are: http://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance[Levenshtein edit distance] of 1 or 2. -<>:: - - Find documents of the specified type. - <>:: Find documents with the specified type and IDs. @@ -78,6 +74,4 @@ include::regexp-query.asciidoc[] include::fuzzy-query.asciidoc[] -include::type-query.asciidoc[] - include::ids-query.asciidoc[] diff --git a/docs/java-api/query-dsl/type-query.asciidoc b/docs/java-api/query-dsl/type-query.asciidoc deleted file mode 100644 index 160deedb9eaca..0000000000000 --- a/docs/java-api/query-dsl/type-query.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -[[java-query-dsl-type-query]] -==== Type Query - -deprecated[7.0.0] - -Types are being removed, prefer filtering on a field instead. For -more information, see {ref}/removal-of-types.html[Removal of mapping types]. - -See {ref}/query-dsl-type-query.html[Type Query] - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{query-dsl-test}[type] --------------------------------------------------- -<1> type diff --git a/docs/java-rest/high-level/query-builders.asciidoc b/docs/java-rest/high-level/query-builders.asciidoc index 32a3b06505b1d..53d9b9af97d12 100644 --- a/docs/java-rest/high-level/query-builders.asciidoc +++ b/docs/java-rest/high-level/query-builders.asciidoc @@ -40,7 +40,6 @@ This page lists all the available search queries with their corresponding `Query | {ref}/query-dsl-wildcard-query.html[Wildcard] | {query-ref}/WildcardQueryBuilder.html[WildcardQueryBuilder] | {query-ref}/QueryBuilders.html#wildcardQuery-java.lang.String-java.lang.String-[QueryBuilders.wildcardQuery()] | {ref}/query-dsl-regexp-query.html[Regexp] | {query-ref}/RegexpQueryBuilder.html[RegexpQueryBuilder] | {query-ref}/QueryBuilders.html#regexpQuery-java.lang.String-java.lang.String-[QueryBuilders.regexpQuery()] | {ref}/query-dsl-fuzzy-query.html[Fuzzy] | {query-ref}/FuzzyQueryBuilder.html[FuzzyQueryBuilder] | {query-ref}/QueryBuilders.html#fuzzyQuery-java.lang.String-java.lang.String-[QueryBuilders.fuzzyQuery()] -| {ref}/query-dsl-type-query.html[Type] | {query-ref}/TypeQueryBuilder.html[TypeQueryBuilder] | {query-ref}/QueryBuilders.html#typeQuery-java.lang.String-[QueryBuilders.typeQuery()] | {ref}/query-dsl-ids-query.html[Ids] | {query-ref}/IdsQueryBuilder.html[IdsQueryBuilder] | {query-ref}/QueryBuilders.html#idsQuery--[QueryBuilders.idsQuery()] |====== diff --git a/docs/reference/query-dsl/term-level-queries.asciidoc b/docs/reference/query-dsl/term-level-queries.asciidoc index f4e185ba9597a..dd7ea38819f01 100644 --- a/docs/reference/query-dsl/term-level-queries.asciidoc +++ b/docs/reference/query-dsl/term-level-queries.asciidoc @@ -60,13 +60,9 @@ The queries in this group are: http://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance[Levenshtein edit distance] of 1 or 2. -<>:: - - Find documents of the specified type. - <>:: - Find documents with the specified type and IDs. + Find documents with the specified IDs. include::term-query.asciidoc[] @@ -87,6 +83,4 @@ include::regexp-query.asciidoc[] include::fuzzy-query.asciidoc[] -include::type-query.asciidoc[] - include::ids-query.asciidoc[] diff --git a/docs/reference/query-dsl/type-query.asciidoc b/docs/reference/query-dsl/type-query.asciidoc deleted file mode 100644 index 4364d1e14e90d..0000000000000 --- a/docs/reference/query-dsl/type-query.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[query-dsl-type-query]] -=== Type Query - -deprecated[7.0.0,Types and the `type` query are deprecated and in the process of being removed. See <>.] - -Filters documents matching the provided document / mapping type. - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "type" : { - "value" : "_doc" - } - } -} --------------------------------------------------- -// CONSOLE diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 1c6850542a971..b5f0e08a45232 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -248,13 +248,6 @@ The `terms` filter has been replaced by the <>. It behave as a query in ``query context'' and as a filter in ``filter context'' (see <>). -[role="exclude",id="query-dsl-type-filter"] -=== Type Filter - -The `type` filter has been replaced by the <>. It behaves -as a query in ``query context'' and as a filter in ``filter context'' (see -<>). - [role="exclude",id="query-dsl-flt-query"] === Fuzzy Like This Query @@ -601,4 +594,4 @@ See <>. [role="exclude",id="_faster_prefix_queries_with_literal_index_prefixes_literal.html"] -See <>. \ No newline at end of file +See <>. diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java index bc825cfb381ba..f3ef60ea215fe 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java @@ -179,7 +179,7 @@ public void testSimpleChildQuery() throws Exception { // TEST FETCHING _parent from child SearchResponse searchResponse; searchResponse = client().prepareSearch("test") - .setQuery(idsQuery("doc").addIds("c1")).get(); + .setQuery(idsQuery().addIds("c1")).get(); assertNoFailures(searchResponse); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index 04f01cf0f0e4c..47cce87c4b959 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -177,7 +177,7 @@ public void testThatGeoShapeQueryGetRequestContainsContextAndHeaders() throws Ex .get(); transportClient().admin().indices().prepareRefresh(lookupIndex, queryIndex).get(); - GeoShapeQueryBuilder queryBuilder = QueryBuilders.geoShapeQuery("location", "1", "type") + GeoShapeQueryBuilder queryBuilder = QueryBuilders.geoShapeQuery("location", "1") .indexedShapeIndex(lookupIndex) .indexedShapePath("location"); diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 5ac70781286a4..30284703e8d6b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -117,18 +117,6 @@ public static IdsQueryBuilder idsQuery() { return new IdsQueryBuilder(); } - /** - * Constructs a query that will match only specific ids within types. - * - * @param types The mapping/doc type - * - * @deprecated Types are in the process of being removed, use {@link #idsQuery()} instead. - */ - @Deprecated - public static IdsQueryBuilder idsQuery(String... types) { - return new IdsQueryBuilder().types(types); - } - /** * A Query that matches documents containing a term. * @@ -426,7 +414,7 @@ public static FunctionScoreQueryBuilder functionScoreQuery(FunctionScoreQueryBui * * @param function The function builder used to custom score */ - public static FunctionScoreQueryBuilder functionScoreQuery(ScoreFunctionBuilder function) { + public static FunctionScoreQueryBuilder functionScoreQuery(ScoreFunctionBuilder function) { return new FunctionScoreQueryBuilder(function); } @@ -436,7 +424,7 @@ public static FunctionScoreQueryBuilder functionScoreQuery(ScoreFunctionBuilder * @param queryBuilder The query to custom score * @param function The function builder used to custom score */ - public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder, ScoreFunctionBuilder function) { + public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder, ScoreFunctionBuilder function) { return (new FunctionScoreQueryBuilder(queryBuilder, function)); } @@ -586,15 +574,6 @@ public static WrapperQueryBuilder wrapperQuery(byte[] source) { return new WrapperQueryBuilder(source); } - /** - * A filter based on doc/mapping type. - * @deprecated Types are going away, prefer filtering on a field. - */ - @Deprecated - public static TypeQueryBuilder typeQuery(String type) { - return new TypeQueryBuilder(type); - } - /** * A terms query that can extract the terms from another doc in an index. */ @@ -653,14 +632,6 @@ public static GeoShapeQueryBuilder geoShapeQuery(String name, String indexedShap return new GeoShapeQueryBuilder(name, indexedShapeId); } - /** - * @deprecated Types are in the process of being removed, use {@link #geoShapeQuery(String, String)} instead. - */ - @Deprecated - public static GeoShapeQueryBuilder geoShapeQuery(String name, String indexedShapeId, String indexedShapeType) { - return new GeoShapeQueryBuilder(name, indexedShapeId, indexedShapeType); - } - /** * A filter to filter indexed shapes intersecting with shapes * @@ -679,16 +650,6 @@ public static GeoShapeQueryBuilder geoIntersectionQuery(String name, String inde return builder; } - /** - * @deprecated Types are in the process of being removed, use {@link #geoIntersectionQuery(String, String)} instead. - */ - @Deprecated - public static GeoShapeQueryBuilder geoIntersectionQuery(String name, String indexedShapeId, String indexedShapeType) { - GeoShapeQueryBuilder builder = geoShapeQuery(name, indexedShapeId, indexedShapeType); - builder.relation(ShapeRelation.INTERSECTS); - return builder; - } - /** * A filter to filter indexed shapes that are contained by a shape * @@ -707,16 +668,6 @@ public static GeoShapeQueryBuilder geoWithinQuery(String name, String indexedSha return builder; } - /** - * @deprecated Types are in the process of being removed, use {@link #geoWithinQuery(String, String)} instead. - */ - @Deprecated - public static GeoShapeQueryBuilder geoWithinQuery(String name, String indexedShapeId, String indexedShapeType) { - GeoShapeQueryBuilder builder = geoShapeQuery(name, indexedShapeId, indexedShapeType); - builder.relation(ShapeRelation.WITHIN); - return builder; - } - /** * A filter to filter indexed shapes that are not intersection with the query shape * @@ -735,16 +686,6 @@ public static GeoShapeQueryBuilder geoDisjointQuery(String name, String indexedS return builder; } - /** - * @deprecated Types are in the process of being removed, use {@link #geoDisjointQuery(String, String)} instead. - */ - @Deprecated - public static GeoShapeQueryBuilder geoDisjointQuery(String name, String indexedShapeId, String indexedShapeType) { - GeoShapeQueryBuilder builder = geoShapeQuery(name, indexedShapeId, indexedShapeType); - builder.relation(ShapeRelation.DISJOINT); - return builder; - } - /** * A filter to filter only documents where a field exists in them. * diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 1467fd1f0971e..3c21085fc905d 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.fetch.subphase.highlight; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -1801,7 +1802,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { index("test", "type1", "2", "text", new String[] {"", text2}); refresh(); - IdsQueryBuilder idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("2"); + IdsQueryBuilder idsQueryBuilder = QueryBuilders.idsQuery().addIds("2"); field.highlighterType("plain"); response = client().prepareSearch("test") .setQuery(idsQueryBuilder) @@ -1824,7 +1825,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { // But if the field was actually empty then you should get no highlighting field index("test", "type1", "3", "text", new String[] {}); refresh(); - idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("3"); + idsQueryBuilder = QueryBuilders.idsQuery().addIds("3"); field.highlighterType("plain"); response = client().prepareSearch("test") .setQuery(idsQueryBuilder) @@ -1847,7 +1848,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { index("test", "type1", "4"); refresh(); - idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("4"); + idsQueryBuilder = QueryBuilders.idsQuery().addIds("4"); field.highlighterType("plain"); response = client().prepareSearch("test") .setQuery(idsQueryBuilder) diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java index a7faa04017258..e3054cb1f6b0c 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java @@ -153,7 +153,7 @@ public void testIndexShapeRouting() throws Exception { indexRandom(true, client().prepareIndex("test", "doc", "0").setSource(source, XContentType.JSON).setRouting("ABC")); SearchResponse searchResponse = client().prepareSearch("test").setQuery( - geoShapeQuery("shape", "0", "doc").indexedShapeIndex("test").indexedShapeRouting("ABC") + geoShapeQuery("shape", "0").indexedShapeIndex("test").indexedShapeRouting("ABC") ).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index 3d1d5b6876a65..ef6bea10d749d 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.geo; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + import org.apache.lucene.geo.GeoTestUtil; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; @@ -233,7 +234,7 @@ public void testIndexedShapeReferenceWithTypes() throws Exception { .endObject()).setRefreshPolicy(IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(geoIntersectionQuery("location", "Big_Rectangle", "shape_type")) + .setQuery(geoIntersectionQuery("location", "Big_Rectangle")) .get(); assertSearchResponse(searchResponse); @@ -242,7 +243,7 @@ public void testIndexedShapeReferenceWithTypes() throws Exception { assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1")); searchResponse = client().prepareSearch("test") - .setQuery(geoShapeQuery("location", "Big_Rectangle", "shape_type")) + .setQuery(geoShapeQuery("location", "Big_Rectangle")) .get(); assertSearchResponse(searchResponse); diff --git a/server/src/test/java/org/elasticsearch/search/geo/LegacyGeoShapeIntegrationIT.java b/server/src/test/java/org/elasticsearch/search/geo/LegacyGeoShapeIntegrationIT.java index 574bdd46bba5b..8ddfbb2793024 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/LegacyGeoShapeIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/LegacyGeoShapeIntegrationIT.java @@ -155,7 +155,7 @@ public void testIndexShapeRouting() throws Exception { indexRandom(true, client().prepareIndex("test", "doc", "0").setSource(source, XContentType.JSON).setRouting("ABC")); SearchResponse searchResponse = client().prepareSearch("test").setQuery( - geoShapeQuery("shape", "0", "doc").indexedShapeIndex("test").indexedShapeRouting("ABC") + geoShapeQuery("shape", "0").indexedShapeIndex("test").indexedShapeRouting("ABC") ).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 5dc3874bcfa6d..7e233b863076a 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.bootstrap.JavaVersion; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; @@ -92,7 +91,6 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.termsLookupQuery; import static org.elasticsearch.index.query.QueryBuilders.termsQuery; -import static org.elasticsearch.index.query.QueryBuilders.typeQuery; import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; import static org.elasticsearch.index.query.QueryBuilders.wrapperQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -486,9 +484,6 @@ public void testTypeFilter() throws Exception { indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"), client().prepareIndex("test", "type1", "2").setSource("field1", "value1")); - assertHitCount(client().prepareSearch().setQuery(typeQuery("type1")).get(), 2L); - assertHitCount(client().prepareSearch().setQuery(typeQuery("type2")).get(), 0L); - assertHitCount(client().prepareSearch().setTypes("type1").setQuery(matchAllQuery()).get(), 2L); assertHitCount(client().prepareSearch().setTypes("type2").setQuery(matchAllQuery()).get(), 0L); @@ -502,7 +497,7 @@ public void testIdsQueryTestsIdIndexed() throws Exception { client().prepareIndex("test", "type1", "2").setSource("field1", "value2"), client().prepareIndex("test", "type1", "3").setSource("field1", "value3")); - SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery("type1").addIds("1", "3"))).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery().addIds("1", "3"))).get(); assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); @@ -511,7 +506,7 @@ public void testIdsQueryTestsIdIndexed() throws Exception { assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); - searchResponse = client().prepareSearch().setQuery(idsQuery("type1").addIds("1", "3")).get(); + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "3")).get(); assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); @@ -520,7 +515,7 @@ public void testIdsQueryTestsIdIndexed() throws Exception { assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "3"); - searchResponse = client().prepareSearch().setQuery(idsQuery("type1").addIds("7", "10")).get(); + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("7", "10")).get(); assertHitCount(searchResponse, 0L); // repeat..., with terms @@ -1156,7 +1151,7 @@ public void testBasicQueryById() throws Exception { client().prepareIndex("test", "_doc", "3").setSource("field1", "value3").get(); refresh(); - SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery("_doc").addIds("1", "2")).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); assertHitCount(searchResponse, 2L); assertThat(searchResponse.getHits().getHits().length, equalTo(2)); @@ -1168,11 +1163,11 @@ public void testBasicQueryById() throws Exception { assertHitCount(searchResponse, 2L); assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - searchResponse = client().prepareSearch().setQuery(idsQuery(Strings.EMPTY_ARRAY).addIds("1")).get(); + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1")).get(); assertHitCount(searchResponse, 1L); assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2", "_doc").addIds("1", "2", "3", "4")).get(); + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2", "3", "4")).get(); assertHitCount(searchResponse, 3L); assertThat(searchResponse.getHits().getHits().length, equalTo(3)); } From 96094974e28da1c480ca4b7fb4ad40ef8ba1654e Mon Sep 17 00:00:00 2001 From: Nikita Glashenko Date: Wed, 22 May 2019 13:49:27 +0400 Subject: [PATCH 188/321] Fix TopHitsAggregationBuilder adding duplicate _score sort clauses (#42179) When using High Level Rest Client Java API to produce search query, using AggregationBuilders.topHits("th").sort("_score", SortOrder.DESC) caused query to contain duplicate sort clauses. --- .../aggregations/metrics/TopHitsAggregationBuilder.java | 6 ++++-- .../aggregations/metrics/TopHitsAggregatorTests.java | 7 ++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index 43bde648657ee..019fec82d0df4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -232,8 +232,9 @@ public TopHitsAggregationBuilder sort(String name, SortOrder order) { } if (name.equals(ScoreSortBuilder.NAME)) { sort(SortBuilders.scoreSort().order(order)); + } else { + sort(SortBuilders.fieldSort(name).order(order)); } - sort(SortBuilders.fieldSort(name).order(order)); return this; } @@ -249,8 +250,9 @@ public TopHitsAggregationBuilder sort(String name) { } if (name.equals(ScoreSortBuilder.NAME)) { sort(SortBuilders.scoreSort()); + } else { + sort(SortBuilders.fieldSort(name)); } - sort(SortBuilders.fieldSort(name)); return this; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java index 585cd7f9ff434..b087909757335 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java @@ -49,7 +49,6 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.TopHits; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.sort.SortOrder; @@ -207,4 +206,10 @@ public void testSetScorer() throws Exception { reader.close(); directory.close(); } + + public void testSortByScore() throws Exception { + // just check that it does not fail with exceptions + testCase(new MatchAllDocsQuery(), topHits("_name").sort("_score", SortOrder.DESC)); + testCase(new MatchAllDocsQuery(), topHits("_name").sort("_score")); + } } From d22844208b228f7f3240d7dfcd44d726c2366624 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 22 May 2019 11:54:28 +0200 Subject: [PATCH 189/321] Remove IndexShard dependency from Repository (#42213) * Remove IndexShard dependency from Repository In order to simplify repository testing especially for BlobStoreRepository it's important to remove the dependency on IndexShard and reduce it to Store and MapperService (in the snapshot case). This significantly reduces the dependcy footprint for Repository and allows unittesting without starting nodes or instantiate entire shard instances. This change deprecates the old method signatures and adds a unittest for FileRepository to show the advantage of this change. In addition, the unittesting surfaced a bug where the internal file names that are private to the repository were used in the recovery stats instead of the target file names which makes it impossible to relate to the actual lucene files in the recovery stats. * don't delegate deprecated methods * apply comments * test --- .../index/shard/StoreRecovery.java | 3 +- .../repositories/FilterRepository.java | 15 +- .../repositories/Repository.java | 52 ++++- .../blobstore/BlobStoreRepository.java | 25 +-- .../blobstore/FileRestoreContext.java | 23 +- .../snapshots/SnapshotShardsService.java | 3 +- .../index/shard/IndexShardTests.java | 4 +- .../RepositoriesServiceTests.java | 10 +- .../repositories/fs/FsRepositoryTests.java | 201 ++++++++++++++++++ .../index/shard/IndexShardTestCase.java | 13 +- .../index/shard/RestoreOnlyRepository.java | 5 +- .../xpack/ccr/repository/CcrRepository.java | 57 +++-- .../ShardFollowTaskReplicationTests.java | 4 +- .../engine/FollowEngineIndexShardTests.java | 4 +- .../SourceOnlySnapshotRepository.java | 24 ++- .../SourceOnlySnapshotShardTests.java | 17 +- 16 files changed, 353 insertions(+), 107 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 0e87b9e2357e5..aa49f7ecb60ce 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -468,7 +468,8 @@ private void restore(final IndexShard indexShard, final Repository repository, f snapshotShardId = new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, shardId.id()); } final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName); - repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), + assert indexShard.getEngineOrNull() == null; + repository.restoreShard(indexShard, indexShard.store(), restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState()); final Store store = indexShard.store(); store.bootstrapNewHistory(); diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java index afc38bda86c5b..1fa42579617e1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java @@ -27,7 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; -import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; @@ -119,16 +119,17 @@ public boolean isReadOnly() { return in.isReadOnly(); } + @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { - in.snapshotShard(shard, store, snapshotId, indexId, snapshotIndexCommit, snapshotStatus); + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + in.snapshotShard(store, mapperService, snapshotId, indexId, snapshotIndexCommit, snapshotStatus); } @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, - RecoveryState recoveryState) { - in.restoreShard(shard, snapshotId, version, indexId, snapshotShardId, recoveryState); + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + in.restoreShard(store, snapshotId, version, indexId, snapshotShardId, recoveryState); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 20f7c42cb21dd..3aa19cb130cae 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -49,7 +50,7 @@ *
    *
  • Master calls {@link #initializeSnapshot(SnapshotId, List, org.elasticsearch.cluster.metadata.MetaData)} * with list of indices that will be included into the snapshot
  • - *
  • Data nodes call {@link Repository#snapshotShard(IndexShard, Store, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} + *
  • Data nodes call {@link Repository#snapshotShard(Store, MapperService, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} * for each shard
  • *
  • When all shard calls return master calls {@link #finalizeSnapshot} with possible list of failures
  • *
@@ -196,30 +197,69 @@ SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long *

* As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. - * @param shard shard to be snapshotted + * @param indexShard the shard to be snapshotted + * @param snapshotId snapshot id + * @param indexId id for the index being snapshotted + * @param snapshotIndexCommit commit point + * @param snapshotStatus snapshot status + * @deprecated use {@link #snapshotShard(Store, MapperService, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} instead + */ + @Deprecated + default void snapshotShard(IndexShard indexShard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, + IndexShardSnapshotStatus snapshotStatus) { + snapshotShard(indexShard.store(), indexShard.mapperService(), snapshotId, indexId, snapshotIndexCommit, snapshotStatus); + } + + /** + * Creates a snapshot of the shard based on the index commit point. + *

+ * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#acquireLastIndexCommit} method. + * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller. + *

+ * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check + * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. * @param store store to be snapshotted + * @param mapperService the shards mapper service * @param snapshotId snapshot id * @param indexId id for the index being snapshotted * @param snapshotIndexCommit commit point * @param snapshotStatus snapshot status */ - void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, + void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus); /** * Restores snapshot of the shard. *

* The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied. - * * @param shard the shard to restore the index into + * @param store the store to restore the index into + * @param snapshotId snapshot id + * @param version version of elasticsearch that created this snapshot + * @param indexId id of the index in the repository from which the restore is occurring + * @param snapshotShardId shard id (in the snapshot) + * @param recoveryState recovery state + * @deprecated use {@link #restoreShard(Store, SnapshotId, Version, IndexId, ShardId, RecoveryState)} instead + */ + @Deprecated + default void restoreShard(IndexShard shard, Store store, SnapshotId snapshotId, Version version, IndexId indexId, + ShardId snapshotShardId, RecoveryState recoveryState) { + restoreShard(store, snapshotId, version, indexId, snapshotShardId, recoveryState); + } + + /** + * Restores snapshot of the shard. + *

+ * The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied. + * @param store the store to restore the index into * @param snapshotId snapshot id * @param version version of elasticsearch that created this snapshot * @param indexId id of the index in the repository from which the restore is occurring * @param snapshotShardId shard id (in the snapshot) * @param recoveryState recovery state */ - void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, - ShardId snapshotShardId, RecoveryState recoveryState); + void restoreShard(Store store, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, + RecoveryState recoveryState); /** * Retrieve shard snapshot status for the stored snapshot diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 320b7ff2d5550..86409ebac7d31 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -71,7 +71,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.Streams; -import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; import org.elasticsearch.index.snapshots.IndexShardSnapshotException; @@ -793,8 +793,8 @@ private void writeAtomic(final String blobName, final BytesReference bytesRef, b } @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { SnapshotContext snapshotContext = new SnapshotContext(store, snapshotId, indexId, snapshotStatus, System.currentTimeMillis()); try { snapshotContext.snapshot(snapshotIndexCommit); @@ -809,18 +809,19 @@ public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, } @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, - RecoveryState recoveryState) { - final Context context = new Context(snapshotId, indexId, shard.shardId(), snapshotShardId); + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + ShardId shardId = store.shardId(); + final Context context = new Context(snapshotId, indexId, shardId, snapshotShardId); BlobPath path = basePath().add("indices").add(indexId.getId()).add(Integer.toString(snapshotShardId.getId())); BlobContainer blobContainer = blobStore().blobContainer(path); - final RestoreContext snapshotContext = new RestoreContext(shard, snapshotId, recoveryState, blobContainer); + final RestoreContext snapshotContext = new RestoreContext(shardId, snapshotId, recoveryState, blobContainer); try { BlobStoreIndexShardSnapshot snapshot = context.loadSnapshot(); SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()); - snapshotContext.restore(snapshotFiles); + snapshotContext.restore(snapshotFiles, store); } catch (Exception e) { - throw new IndexShardRestoreFailedException(shard.shardId(), "failed to restore snapshot [" + snapshotId + "]", e); + throw new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId + "]", e); } } @@ -1366,13 +1367,13 @@ private class RestoreContext extends FileRestoreContext { /** * Constructs new restore context - * @param indexShard shard to restore into + * @param shardId shard id to restore into * @param snapshotId snapshot id * @param recoveryState recovery state to report progress * @param blobContainer the blob container to read the files from */ - RestoreContext(IndexShard indexShard, SnapshotId snapshotId, RecoveryState recoveryState, BlobContainer blobContainer) { - super(metadata.name(), indexShard, snapshotId, recoveryState, BUFFER_SIZE); + RestoreContext(ShardId shardId, SnapshotId snapshotId, RecoveryState recoveryState, BlobContainer blobContainer) { + super(metadata.name(), shardId, snapshotId, recoveryState, BUFFER_SIZE); this.blobContainer = blobContainer; } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java index 1e0ab2dd8beee..f78ddab9ee44c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java @@ -31,7 +31,6 @@ import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.iterable.Iterables; -import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -64,7 +63,6 @@ public abstract class FileRestoreContext { protected static final Logger logger = LogManager.getLogger(FileRestoreContext.class); protected final String repositoryName; - protected final IndexShard indexShard; protected final RecoveryState recoveryState; protected final SnapshotId snapshotId; protected final ShardId shardId; @@ -73,26 +71,24 @@ public abstract class FileRestoreContext { /** * Constructs new restore context * - * @param indexShard shard to restore into + * @param shardId shard id to restore into * @param snapshotId snapshot id * @param recoveryState recovery state to report progress * @param bufferSize buffer size for restore */ - protected FileRestoreContext(String repositoryName, IndexShard indexShard, SnapshotId snapshotId, RecoveryState recoveryState, + protected FileRestoreContext(String repositoryName, ShardId shardId, SnapshotId snapshotId, RecoveryState recoveryState, int bufferSize) { this.repositoryName = repositoryName; this.recoveryState = recoveryState; - this.indexShard = indexShard; this.snapshotId = snapshotId; - this.shardId = indexShard.shardId(); + this.shardId = shardId; this.bufferSize = bufferSize; } /** * Performs restore operation */ - public void restore(SnapshotFiles snapshotFiles) throws IOException { - final Store store = indexShard.store(); + public void restore(SnapshotFiles snapshotFiles, Store store) throws IOException { store.incRef(); try { logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId); @@ -108,7 +104,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException { // version number and no checksum, even though the index itself is perfectly fine to restore, this // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty // shard anyway, we just create the empty shard here and then exit. - store.createEmpty(indexShard.indexSettings().getIndexMetaData().getCreationVersion().luceneVersion); + store.createEmpty(store.indexSettings().getIndexVersionCreated().luceneVersion); return; } @@ -117,7 +113,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException { // this will throw an IOException if the store has no segments infos file. The // store can still have existing files but they will be deleted just before being // restored. - recoveryTargetMetadata = indexShard.snapshotStoreMetadata(); + recoveryTargetMetadata = store.getMetadata(null, true); } catch (org.apache.lucene.index.IndexNotFoundException e) { // happens when restore to an empty shard, not a big deal logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId); @@ -127,7 +123,6 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException { shardId, snapshotId), e); recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY; } - final List filesToRecover = new ArrayList<>(); final Map snapshotMetaData = new HashMap<>(); final Map fileInfos = new HashMap<>(); @@ -157,7 +152,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException { final Store.RecoveryDiff diff = sourceMetaData.recoveryDiff(recoveryTargetMetadata); for (StoreFileMetaData md : diff.identical) { BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfos.get(md.name()); - recoveryState.getIndex().addFileDetail(fileInfo.name(), fileInfo.length(), true); + recoveryState.getIndex().addFileDetail(fileInfo.physicalName(), fileInfo.length(), true); if (logger.isTraceEnabled()) { logger.trace("[{}] [{}] not_recovering file [{}] from [{}], exists in local store and is same", shardId, snapshotId, fileInfo.physicalName(), fileInfo.name()); @@ -167,7 +162,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException { for (StoreFileMetaData md : concat(diff)) { BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfos.get(md.name()); filesToRecover.add(fileInfo); - recoveryState.getIndex().addFileDetail(fileInfo.name(), fileInfo.length(), false); + recoveryState.getIndex().addFileDetail(fileInfo.physicalName(), fileInfo.length(), false); if (logger.isTraceEnabled()) { logger.trace("[{}] [{}] recovering [{}] from [{}]", shardId, snapshotId, fileInfo.physicalName(), fileInfo.name()); @@ -260,7 +255,7 @@ private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, fi int length; while ((length = stream.read(buffer)) > 0) { indexOutput.writeBytes(buffer, 0, length); - recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.name(), length); + recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.physicalName(), length); } Store.verify(indexOutput); indexOutput.close(); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index a0c5ea9392c67..f79b6da6ef626 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -367,8 +367,7 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina try { // we flush first to make sure we get the latest writes snapshotted try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) { - repository.snapshotShard(indexShard, indexShard.store(), snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), - snapshotStatus); + repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus); if (logger.isDebugEnabled()) { final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 04ef68852cc3f..1710154f72f94 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2300,8 +2300,8 @@ public void testRestoreShard() throws IOException { target.markAsRecovering("store", new RecoveryState(routing, localNode, null)); assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") { @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, - RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { try { cleanLuceneIndex(targetStore.directory()); for (String file : sourceStore.directory().listAll()) { diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 505c0628d6aba..ae703795ec622 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; @@ -200,14 +200,14 @@ public boolean isReadOnly() { } @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit + snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { } @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, - RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { } diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java new file mode 100644 index 0000000000000..ec8a444d84fae --- /dev/null +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java @@ -0,0 +1,201 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.fs; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.CodecReader; +import org.apache.lucene.index.FilterMergePolicy; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.IOSupplier; +import org.apache.lucene.util.TestUtil; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingHelper; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.test.DummyShardLock; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; + +public class FsRepositoryTests extends ESTestCase { + + public void testSnapshotAndRestore() throws IOException, InterruptedException { + ThreadPool threadPool = new TestThreadPool(getClass().getSimpleName()); + try (Directory directory = newDirectory()) { + Path repo = createTempDir(); + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .put("location", repo) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES).build(); + + int numDocs = indexDocs(directory); + RepositoryMetaData metaData = new RepositoryMetaData("test", "fs", settings); + FsRepository repository = new FsRepository(metaData, new Environment(settings, null), NamedXContentRegistry.EMPTY, threadPool); + repository.start(); + final Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "myindexUUID").build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("myindex", indexSettings); + ShardId shardId = new ShardId(idxSettings.getIndex(), 1); + Store store = new Store(shardId, idxSettings, directory, new DummyShardLock(shardId)); + SnapshotId snapshotId = new SnapshotId("test", "test"); + IndexId indexId = new IndexId(idxSettings.getIndex().getName(), idxSettings.getUUID()); + + IndexCommit indexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory()); + runGeneric(threadPool, () -> { + IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); + repository.snapshotShard(store, null, snapshotId, indexId, indexCommit, + snapshotStatus); + IndexShardSnapshotStatus.Copy copy = snapshotStatus.asCopy(); + assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); + }); + Lucene.cleanLuceneIndex(directory); + expectThrows(org.apache.lucene.index.IndexNotFoundException.class, () -> Lucene.readSegmentInfos(directory)); + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + ShardRouting routing = ShardRouting.newUnassigned(shardId, true, new RecoverySource.SnapshotRecoverySource("test", + new Snapshot("foo", snapshotId), Version.CURRENT, "myindex"), + new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, "")); + routing = ShardRoutingHelper.initialize(routing, localNode.getId(), 0); + RecoveryState state = new RecoveryState(routing, localNode, null); + runGeneric(threadPool, () -> + repository.restoreShard(store, snapshotId, Version.CURRENT, indexId, shardId, state)); + assertTrue(state.getIndex().recoveredBytes() > 0); + assertEquals(0, state.getIndex().reusedFileCount()); + assertEquals(indexCommit.getFileNames().size(), state.getIndex().recoveredFileCount()); + assertEquals(numDocs, Lucene.readSegmentInfos(directory).totalMaxDoc()); + deleteRandomDoc(store.directory()); + SnapshotId incSnapshotId = new SnapshotId("test1", "test1"); + IndexCommit incIndexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory()); + Collection commitFileNames = incIndexCommit.getFileNames(); + runGeneric(threadPool, () -> { + IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); + repository.snapshotShard(store, null, incSnapshotId, indexId, incIndexCommit, snapshotStatus); + IndexShardSnapshotStatus.Copy copy = snapshotStatus.asCopy(); + assertEquals(2, copy.getIncrementalFileCount()); + assertEquals(commitFileNames.size(), copy.getTotalFileCount()); + }); + + // roll back to the first snap and then incrementally restore + RecoveryState firstState = new RecoveryState(routing, localNode, null); + runGeneric(threadPool, () -> + repository.restoreShard(store, snapshotId, Version.CURRENT, indexId, shardId, firstState)); + assertEquals("should reuse everything except of .liv and .si", + commitFileNames.size()-2, firstState.getIndex().reusedFileCount()); + + RecoveryState secondState = new RecoveryState(routing, localNode, null); + runGeneric(threadPool, () -> + repository.restoreShard(store, incSnapshotId, Version.CURRENT, indexId, shardId, secondState)); + assertEquals(secondState.getIndex().reusedFileCount(), commitFileNames.size()-2); + assertEquals(secondState.getIndex().recoveredFileCount(), 2); + List recoveredFiles = + secondState.getIndex().fileDetails().stream().filter(f -> f.reused() == false).collect(Collectors.toList()); + Collections.sort(recoveredFiles, Comparator.comparing(RecoveryState.File::name)); + assertTrue(recoveredFiles.get(0).name(), recoveredFiles.get(0).name().endsWith(".liv")); + assertTrue(recoveredFiles.get(1).name(), recoveredFiles.get(1).name().endsWith("segments_2")); + } finally { + terminate(threadPool); + } + } + + private void runGeneric(ThreadPool threadPool, Runnable runnable) throws InterruptedException { + CountDownLatch latch = new CountDownLatch(1); + threadPool.generic().submit(() -> { + try { + runnable.run(); + } finally { + latch.countDown(); + } + }); + latch.await(); + } + + private void deleteRandomDoc(Directory directory) throws IOException { + try(IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(random(), + new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()).setMergePolicy(new FilterMergePolicy(NoMergePolicy.INSTANCE) { + @Override + public boolean keepFullyDeletedSegment(IOSupplier readerIOSupplier) { + return true; + } + + }))) { + final int numDocs = writer.getDocStats().numDocs; + writer.deleteDocuments(new Term("id", "" + randomIntBetween(0, writer.getDocStats().numDocs-1))); + writer.commit(); + assertEquals(writer.getDocStats().numDocs, numDocs-1); + } + } + + private int indexDocs(Directory directory) throws IOException { + try(IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(random(), + new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()))) { + int docs = 1 + random().nextInt(100); + for (int i = 0; i < docs; i++) { + Document doc = new Document(); + doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add(new TextField("body", + TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); + writer.addDocument(doc); + } + writer.commit(); + return docs; + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 6175a22760029..2a2176f1c100d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -797,7 +797,7 @@ protected void flushShard(IndexShard shard, boolean force) { /** Recover a shard from a snapshot using a given repository **/ protected void recoverShardFromSnapshot(final IndexShard shard, final Snapshot snapshot, - final Repository repository) throws IOException { + final Repository repository) { final Version version = Version.CURRENT; final ShardId shardId = shard.shardId(); final String index = shardId.getIndexName(); @@ -806,9 +806,12 @@ protected void recoverShardFromSnapshot(final IndexShard shard, final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, version, index); final ShardRouting shardRouting = newShardRouting(shardId, node.getId(), true, ShardRoutingState.INITIALIZING, recoverySource); - shard.markAsRecovering("from snapshot", new RecoveryState(shardRouting, node, null)); - repository.restoreShard(shard, snapshot.getSnapshotId(), version, indexId, shard.shardId(), shard.recoveryState()); + repository.restoreShard(shard.store(), + snapshot.getSnapshotId(), version, + indexId, + shard.shardId(), + shard.recoveryState()); } /** Snapshot a shard using a given repository **/ @@ -820,8 +823,8 @@ protected void snapshotShard(final IndexShard shard, Index index = shard.shardId().getIndex(); IndexId indexId = new IndexId(index.getName(), index.getUUID()); - repository.snapshotShard(shard, shard.store(), snapshot.getSnapshotId(), indexId, indexCommitRef.getIndexCommit(), - snapshotStatus); + repository.snapshotShard(shard.store(), shard.mapperService(), snapshot.getSnapshotId(), indexId, + indexCommitRef.getIndexCommit(), snapshotStatus); } final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java index bc60b4c194622..2279b48c3c023 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.repositories.IndexId; @@ -133,8 +134,8 @@ public boolean isReadOnly() { } @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 5a0472339c192..3010f90b803e9 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -42,10 +42,10 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; -import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardRecoveryException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; @@ -294,18 +294,19 @@ public boolean isReadOnly() { } @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); } @Override - public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId, - RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { // TODO: Add timeouts to network calls / the restore process. - createEmptyStore(indexShard, shardId); + createEmptyStore(store); + ShardId shardId = store.shardId(); - final Map ccrMetaData = indexShard.indexSettings().getIndexMetaData().getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); + final Map ccrMetaData = store.indexSettings().getIndexMetaData().getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); final String leaderIndexName = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY); final String leaderUUID = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY); final Index leaderIndex = new Index(leaderIndexName, leaderUUID); @@ -314,14 +315,14 @@ public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version v final Client remoteClient = getRemoteClusterClient(); final String retentionLeaseId = - retentionLeaseId(localClusterName, indexShard.shardId().getIndex(), remoteClusterAlias, leaderIndex); + retentionLeaseId(localClusterName, shardId.getIndex(), remoteClusterAlias, leaderIndex); acquireRetentionLeaseOnLeader(shardId, retentionLeaseId, leaderShardId, remoteClient); // schedule renewals to run during the restore final Scheduler.Cancellable renewable = threadPool.scheduleWithFixedDelay( () -> { - logger.trace("{} background renewal of retention lease [{}] during restore", indexShard.shardId(), retentionLeaseId); + logger.trace("{} background renewal of retention lease [{}] during restore", shardId, retentionLeaseId); final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { // we have to execute under the system context so that if security is enabled the renewal is authorized @@ -336,36 +337,34 @@ public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version v e -> { assert e instanceof ElasticsearchSecurityException == false : e; logger.warn(new ParameterizedMessage( - "{} background renewal of retention lease [{}] failed during restore", - indexShard.shardId(), - retentionLeaseId), - e); + "{} background renewal of retention lease [{}] failed during restore", shardId, + retentionLeaseId), e); })); } }, - CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(indexShard.indexSettings().getNodeSettings()), + CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(store.indexSettings().getNodeSettings()), Ccr.CCR_THREAD_POOL_NAME); // TODO: There should be some local timeout. And if the remote cluster returns an unknown session // response, we should be able to retry by creating a new session. - try (RestoreSession restoreSession = openSession(metadata.name(), remoteClient, leaderShardId, indexShard, recoveryState)) { - restoreSession.restoreFiles(); - updateMappings(remoteClient, leaderIndex, restoreSession.mappingVersion, client, indexShard.routingEntry().index()); + try (RestoreSession restoreSession = openSession(metadata.name(), remoteClient, leaderShardId, shardId, recoveryState)) { + restoreSession.restoreFiles(store); + updateMappings(remoteClient, leaderIndex, restoreSession.mappingVersion, client, shardId.getIndex()); } catch (Exception e) { - throw new IndexShardRestoreFailedException(indexShard.shardId(), "failed to restore snapshot [" + snapshotId + "]", e); + throw new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId + "]", e); } finally { - logger.trace("{} canceling background renewal of retention lease [{}] at the end of restore", shardId, retentionLeaseId); + logger.trace("{} canceling background renewal of retention lease [{}] at the end of restore", shardId, + retentionLeaseId); renewable.cancel(); } } - private void createEmptyStore(final IndexShard indexShard, final ShardId shardId) { - final Store store = indexShard.store(); + private void createEmptyStore(Store store) { store.incRef(); try { - store.createEmpty(indexShard.indexSettings().getIndexMetaData().getCreationVersion().luceneVersion); + store.createEmpty(store.indexSettings().getIndexVersionCreated().luceneVersion); } catch (final EngineException | IOException e) { - throw new IndexShardRecoveryException(shardId, "failed to create empty store", e); + throw new IndexShardRecoveryException(store.shardId(), "failed to create empty store", e); } finally { store.decRef(); } @@ -432,12 +431,12 @@ private void updateMappings(Client leaderClient, Index leaderIndex, long leaderM } } - RestoreSession openSession(String repositoryName, Client remoteClient, ShardId leaderShardId, IndexShard indexShard, + RestoreSession openSession(String repositoryName, Client remoteClient, ShardId leaderShardId, ShardId indexShardId, RecoveryState recoveryState) { String sessionUUID = UUIDs.randomBase64UUID(); PutCcrRestoreSessionAction.PutCcrRestoreSessionResponse response = remoteClient.execute(PutCcrRestoreSessionAction.INSTANCE, new PutCcrRestoreSessionRequest(sessionUUID, leaderShardId)).actionGet(ccrSettings.getRecoveryActionTimeout()); - return new RestoreSession(repositoryName, remoteClient, sessionUUID, response.getNode(), indexShard, recoveryState, + return new RestoreSession(repositoryName, remoteClient, sessionUUID, response.getNode(), indexShardId, recoveryState, response.getStoreFileMetaData(), response.getMappingVersion(), threadPool, ccrSettings, throttledTime::inc); } @@ -452,10 +451,10 @@ private static class RestoreSession extends FileRestoreContext implements Closea private final LongConsumer throttleListener; private final ThreadPool threadPool; - RestoreSession(String repositoryName, Client remoteClient, String sessionUUID, DiscoveryNode node, IndexShard indexShard, + RestoreSession(String repositoryName, Client remoteClient, String sessionUUID, DiscoveryNode node, ShardId shardId, RecoveryState recoveryState, Store.MetadataSnapshot sourceMetaData, long mappingVersion, ThreadPool threadPool, CcrSettings ccrSettings, LongConsumer throttleListener) { - super(repositoryName, indexShard, SNAPSHOT_ID, recoveryState, Math.toIntExact(ccrSettings.getChunkSize().getBytes())); + super(repositoryName, shardId, SNAPSHOT_ID, recoveryState, Math.toIntExact(ccrSettings.getChunkSize().getBytes())); this.remoteClient = remoteClient; this.sessionUUID = sessionUUID; this.node = node; @@ -466,14 +465,14 @@ private static class RestoreSession extends FileRestoreContext implements Closea this.throttleListener = throttleListener; } - void restoreFiles() throws IOException { + void restoreFiles(Store store) throws IOException { ArrayList fileInfos = new ArrayList<>(); for (StoreFileMetaData fileMetaData : sourceMetaData) { ByteSizeValue fileSize = new ByteSizeValue(fileMetaData.length()); fileInfos.add(new FileInfo(fileMetaData.name(), fileMetaData, fileSize)); } SnapshotFiles snapshotFiles = new SnapshotFiles(LATEST, fileInfos); - restore(snapshotFiles); + restore(snapshotFiles, store); } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index c5a357c7df817..abef313d0b017 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -447,8 +447,8 @@ protected synchronized void recoverPrimary(IndexShard primary) { primary.markAsRecovering("remote recovery from leader", new RecoveryState(routing, localNode, null)); primary.restoreFromRepository(new RestoreOnlyRepository(index.getName()) { @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, - IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { try { IndexShard leader = leaderGroup.getPrimary(); Lucene.cleanLuceneIndex(primary.store().directory()); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java index 947ce78da2ca3..f8260f2fce57c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java @@ -127,8 +127,8 @@ public void testRestoreShard() throws IOException { target.markAsRecovering("store", new RecoveryState(routing, localNode, null)); assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") { @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, - RecoveryState recoveryState) { + public void restoreShard(Store store, SnapshotId snapshotId, + Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { try { cleanLuceneIndex(targetStore.directory()); for (String file : sourceStore.directory().listAll()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java index d7f70cf8ef2e1..bb5819e1bda43 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java @@ -10,7 +10,9 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; @@ -24,8 +26,7 @@ import org.elasticsearch.env.ShardLock; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.ReadOnlyEngine; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.TranslogStats; @@ -104,15 +105,18 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met } @Override - public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { - if (shard.mapperService().documentMapper() != null // if there is no mapping this is null - && shard.mapperService().documentMapper().sourceMapper().isComplete() == false) { + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + if (mapperService.documentMapper() != null // if there is no mapping this is null + && mapperService.documentMapper().sourceMapper().isComplete() == false) { throw new IllegalStateException("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled " + "or filters the source"); } - ShardPath shardPath = shard.shardPath(); - Path dataPath = shardPath.getDataPath(); + Directory unwrap = FilterDirectory.unwrap(store.directory()); + if (unwrap instanceof FSDirectory == false) { + throw new AssertionError("expected FSDirectory but got " + unwrap.toString()); + } + Path dataPath = ((FSDirectory) unwrap).getDirectory().getParent(); // TODO should we have a snapshot tmp directory per shard that is maintained by the system? Path snapPath = dataPath.resolve(SNAPSHOT_DIR_NAME); try (FSDirectory directory = new SimpleFSDirectory(snapPath)) { @@ -122,7 +126,7 @@ protected void closeInternal() { // do nothing; } }, Store.OnClose.EMPTY); - Supplier querySupplier = shard.mapperService().hasNested() ? Queries::newNestedFilter : null; + Supplier querySupplier = mapperService.hasNested() ? Queries::newNestedFilter : null; // SourceOnlySnapshot will take care of soft- and hard-deletes no special casing needed here SourceOnlySnapshot snapshot = new SourceOnlySnapshot(tempStore.directory(), querySupplier); snapshot.syncSnapshot(snapshotIndexCommit); @@ -133,7 +137,7 @@ protected void closeInternal() { store.incRef(); try (DirectoryReader reader = DirectoryReader.open(tempStore.directory())) { IndexCommit indexCommit = reader.getIndexCommit(); - super.snapshotShard(shard, tempStore, snapshotId, indexId, indexCommit, snapshotStatus); + super.snapshotShard(tempStore, mapperService, snapshotId, indexId, indexCommit, snapshotStatus); } finally { store.decRef(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java index 6a37e8265c096..948503b33478c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java @@ -98,7 +98,7 @@ public void testSourceIncomplete() throws IOException { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> runAsSnapshot(shard.getThreadPool(), - () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, + () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, snapshotRef.getIndexCommit(), indexShardSnapshotStatus))); assertEquals("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled or filters the source" , illegalStateException.getMessage()); @@ -120,8 +120,8 @@ public void testIncrementalSnapshot() throws IOException { try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); SnapshotId snapshotId = new SnapshotId("test", "test"); - runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef - .getIndexCommit(), indexShardSnapshotStatus)); + runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, + snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); totalFileCount = copy.getTotalFileCount(); @@ -134,8 +134,8 @@ public void testIncrementalSnapshot() throws IOException { SnapshotId snapshotId = new SnapshotId("test_1", "test_1"); IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); - runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef - .getIndexCommit(), indexShardSnapshotStatus)); + runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, + snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); // we processed the segments_N file plus _1.si, _1.fdx, _1.fnm, _1.fdt assertEquals(5, copy.getIncrementalFileCount()); @@ -148,8 +148,8 @@ public void testIncrementalSnapshot() throws IOException { SnapshotId snapshotId = new SnapshotId("test_2", "test_2"); IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); - runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef - .getIndexCommit(), indexShardSnapshotStatus)); + runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, + snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); // we processed the segments_N file plus _1_1.liv assertEquals(2, copy.getIncrementalFileCount()); @@ -197,7 +197,8 @@ public void testRestoreMinmal() throws IOException { repository.initializeSnapshot(snapshotId, Arrays.asList(indexId), MetaData.builder().put(shard.indexSettings() .getIndexMetaData(), false).build()); - repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef.getIndexCommit(), indexShardSnapshotStatus); + repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, snapshotRef.getIndexCommit(), + indexShardSnapshotStatus); }); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); From 8918dd1f8641f04b16433b7e1fa035bf713b2a26 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 22 May 2019 13:20:18 +0300 Subject: [PATCH 190/321] Fail early when rp.client_secret is missing in OIDC realm (#42256) rp.client_secret is a required secure setting. Make sure we fail with a SettingsException and a clear, actionable message when building the realm, if the setting is missing. --- .../authc/oidc/OpenIdConnectRealm.java | 4 +++ .../authc/SecurityRealmSettingsTests.java | 8 ++++- .../oidc/OpenIdConnectRealmSettingsTests.java | 36 +++++++++++++++++++ .../authc/oidc/OpenIdConnectRealmTests.java | 18 +++++++--- .../authc/oidc/OpenIdConnectTestCase.java | 11 +++++- 5 files changed, 70 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java index 5f876a677d689..ac933dcfef878 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java @@ -247,6 +247,10 @@ private RelyingPartyConfiguration buildRelyingPartyConfiguration(RealmConfig con } final ClientID clientId = new ClientID(require(config, RP_CLIENT_ID)); final SecureString clientSecret = config.getSetting(RP_CLIENT_SECRET); + if (clientSecret.length() == 0) { + throw new SettingsException("The configuration setting [" + RealmSettings.getFullSettingKey(config, RP_CLIENT_SECRET) + + "] is required"); + } final ResponseType responseType; try { // This should never happen as it's already validated in the settings diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java index bccee36631e3d..b9a557320e3e1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java @@ -8,6 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; @@ -52,8 +53,12 @@ protected Settings nodeSettings(int nodeOrdinal) { final Path jwkSet = createTempFile("jwkset", "json"); OpenIdConnectTestCase.writeJwkSetToFile(jwkSet); + final Settings existingSettings = super.nodeSettings(nodeOrdinal); + MockSecureSettings mockSecureSettings = + (MockSecureSettings) Settings.builder().put(existingSettings).getSecureSettings(); + mockSecureSettings.setString("xpack.security.authc.realms.oidc.oidc1.rp.client_secret", randomAlphaOfLength(12)); settings = Settings.builder() - .put(super.nodeSettings(nodeOrdinal).filter(s -> s.startsWith("xpack.security.authc.realms.") == false)) + .put(existingSettings.filter(s -> s.startsWith("xpack.security.authc.realms.") == false), false) .put("xpack.security.authc.token.enabled", true) .put("xpack.security.authc.realms.file.file1.order", 1) .put("xpack.security.authc.realms.native.native1.order", 2) @@ -80,6 +85,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .put("xpack.security.authc.realms.oidc.oidc1.rp.client_id", "my_client") .put("xpack.security.authc.realms.oidc.oidc1.rp.response_type", "code") .put("xpack.security.authc.realms.oidc.oidc1.claims.principal", "sub") + .setSecureSettings(mockSecureSettings) .build(); } catch (IOException e) { throw new RuntimeException(e); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java index 8dbf27070c492..341cf07b0dd7b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.security.authc.oidc; +import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -42,6 +43,7 @@ public void testIncorrectResponseTypeThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "hybrid"); + settingsBuilder.setSecureSettings(getSecureSettings()); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -58,6 +60,7 @@ public void testMissingAuthorizationEndpointThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -75,6 +78,7 @@ public void testInvalidAuthorizationEndpointThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -91,6 +95,7 @@ public void testMissingTokenEndpointThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -108,6 +113,7 @@ public void testInvalidTokenEndpointThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -123,6 +129,7 @@ public void testMissingJwksUrlThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -139,6 +146,7 @@ public void testMissingIssuerThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -155,6 +163,7 @@ public void testMissingRedirectUriThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -171,6 +180,7 @@ public void testMissingClientIdThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -189,6 +199,7 @@ public void testMissingPrincipalClaimThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES), Arrays.asList("openid", "scope1", "scope2")); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -209,6 +220,7 @@ public void testPatternWithoutSettingThrowsError() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES), Arrays.asList("openid", "scope1", "scope2")); + settingsBuilder.setSecureSettings(getSecureSettings()); SettingsException exception = expectThrows(SettingsException.class, () -> { new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); }); @@ -218,6 +230,30 @@ public void testPatternWithoutSettingThrowsError() { Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getPattern()))); } + public void testMissingClientSecretThrowsError() { + final Settings.Builder settingsBuilder = Settings.builder() + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + SettingsException exception = expectThrows(SettingsException.class, () -> { + new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); + }); + assertThat(exception.getMessage(), + Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_SECRET))); + } + + private MockSecureSettings getSecureSettings() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_SECRET), + randomAlphaOfLengthBetween(12, 18)); + return secureSettings; + } + private RealmConfig buildConfig(Settings realmSettings) { final Settings settings = Settings.builder() .put("path.home", createTempDir()) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java index 151a7e1caea19..162b88224414e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java @@ -165,7 +165,8 @@ public void testBuildRelyingPartyConfigWithoutOpenIdScope() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES), - Arrays.asList("scope1", "scope2")); + Arrays.asList("scope1", "scope2")) + .setSecureSettings(getSecureSettings()); final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null, null); final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null); @@ -187,7 +188,8 @@ public void testBuildingAuthenticationRequest() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") .putList(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REQUESTED_SCOPES), - Arrays.asList("openid", "scope1", "scope2")); + Arrays.asList("openid", "scope1", "scope2")) + .setSecureSettings(getSecureSettings()); final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null, null); final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null); @@ -207,7 +209,9 @@ public void testBuilidingAuthenticationRequestWithDefaultScope() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") + .setSecureSettings(getSecureSettings()); + ; final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null, null); final OpenIdConnectPrepareAuthenticationResponse response = realm.buildAuthenticationRequestUri(null, null, null); @@ -237,7 +241,9 @@ public void testBuildingAuthenticationRequestWithExistingStateAndNonce() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") + .setSecureSettings(getSecureSettings()); + ; final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null, null); final String state = new State().getValue(); @@ -257,7 +263,9 @@ public void testBuildingAuthenticationRequestWithLoginHint() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code") + .setSecureSettings(getSecureSettings()); + ; final OpenIdConnectRealm realm = new OpenIdConnectRealm(buildConfig(settingsBuilder.build(), threadContext), null, null); final String state = new State().getValue(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java index 9c1c4e981109a..63071a3d1cb40 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java @@ -12,6 +12,7 @@ import com.nimbusds.jwt.JWTClaimsSet; import com.nimbusds.jwt.SignedJWT; import com.nimbusds.openid.connect.sdk.Nonce; +import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; @@ -50,7 +51,15 @@ protected static Settings.Builder getBasicRealmSettings() { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.GROUPS_CLAIM.getClaim()), "groups") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.MAIL_CLAIM.getClaim()), "mail") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getClaim()), "name"); + .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getClaim()), "name") + .setSecureSettings(getSecureSettings()); + } + + protected static MockSecureSettings getSecureSettings() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_SECRET), + randomAlphaOfLengthBetween(12, 18)); + return secureSettings; } protected JWT generateIdToken(String subject, String audience, String issuer) throws Exception { From 3b67d87bf6d6b23694fadbcb8ab8b0d83ac3905d Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 12:25:48 +0200 Subject: [PATCH 191/321] Avoid bubbling up failures from a shard that is recovering (#42287) A shard that is undergoing peer recovery is subject to logging warnings of the form org.elasticsearch.action.FailedNodeException: Failed node [XYZ] ... Caused by: org.apache.lucene.index.IndexNotFoundException: no segments* file found in ... These failures are actually harmless, and expected to happen while a peer recovery is ongoing (i.e. there is an IndexShard instance, but no proper IndexCommit just yet). As these failures are currently bubbled up to the master, they cause unnecessary reroutes and confusion amongst users due to being logged as warnings. Closes #40107 --- .../TransportNodesListShardStoreMetaData.java | 14 +++++- .../indices/recovery/IndexRecoveryIT.java | 44 +++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index bc041b4b322ae..20307af32f4ed 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.store; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; @@ -123,8 +124,17 @@ private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException if (indexService != null) { IndexShard indexShard = indexService.getShardOrNull(shardId.id()); if (indexShard != null) { - exists = true; - return new StoreFilesMetaData(shardId, indexShard.snapshotStoreMetadata()); + try { + final StoreFilesMetaData storeFilesMetaData = new StoreFilesMetaData(shardId, indexShard.snapshotStoreMetadata()); + exists = true; + return storeFilesMetaData; + } catch (org.apache.lucene.index.IndexNotFoundException e) { + logger.trace(new ParameterizedMessage("[{}] node is missing index, responding with empty", shardId), e); + return new StoreFilesMetaData(shardId, Store.MetadataSnapshot.EMPTY); + } catch (IOException e) { + logger.warn(new ParameterizedMessage("[{}] can't read metadata from store, responding with empty", shardId), e); + return new StoreFilesMetaData(shardId, Store.MetadataSnapshot.EMPTY); + } } } // try and see if we an list unallocated diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 0ea8eb8e9b447..4710c59647c25 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -923,6 +923,50 @@ public void testDoNotInfinitelyWaitForMapping() { assertHitCount(client().prepareSearch().get(), numDocs); } + /** Makes sure the new master does not repeatedly fetch index metadata from recovering replicas */ + public void testOngoingRecoveryAndMasterFailOver() throws Exception { + String indexName = "test"; + internalCluster().startNodes(2); + String nodeWithPrimary = internalCluster().startDataOnlyNode(); + assertAcked(client().admin().indices().prepareCreate(indexName) + .setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.include._name", nodeWithPrimary))); + MockTransportService transport = (MockTransportService) internalCluster().getInstance(TransportService.class, nodeWithPrimary); + CountDownLatch phase1ReadyBlocked = new CountDownLatch(1); + CountDownLatch allowToCompletePhase1Latch = new CountDownLatch(1); + Semaphore blockRecovery = new Semaphore(1); + transport.addSendBehavior((connection, requestId, action, request, options) -> { + if (PeerRecoveryTargetService.Actions.CLEAN_FILES.equals(action) && blockRecovery.tryAcquire()) { + phase1ReadyBlocked.countDown(); + try { + allowToCompletePhase1Latch.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + } + connection.sendRequest(requestId, action, request, options); + }); + try { + String nodeWithReplica = internalCluster().startDataOnlyNode(); + assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.routing.allocation.include._name", nodeWithPrimary + "," + nodeWithReplica))); + phase1ReadyBlocked.await(); + internalCluster().restartNode(clusterService().state().nodes().getMasterNode().getName(), + new InternalTestCluster.RestartCallback()); + internalCluster().ensureAtLeastNumDataNodes(3); + assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) + .putNull("index.routing.allocation.include._name"))); + assertFalse(client().admin().cluster().prepareHealth(indexName).setWaitForActiveShards(2).get().isTimedOut()); + } finally { + allowToCompletePhase1Latch.countDown(); + } + ensureGreen(indexName); + } + public void testRecoveryFlushReplica() throws Exception { internalCluster().ensureAtLeastNumDataNodes(3); String indexName = "test-index"; From 7ab59eef11f5f966ae3cad385237a4f8b7ad115f Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 12:55:47 +0200 Subject: [PATCH 192/321] Some Cleanup in o.e.i.engine (#42278) * Some Cleanup in o.e.i.engine * Remove dead code and parameters * Reduce visibility in some obvious spots * Add missing `assert`s (not that important here since the methods themselves will probably be dead-code eliminated) but still --- .../elasticsearch/index/engine/Engine.java | 10 +-- .../index/engine/InternalEngine.java | 35 +++++----- .../index/engine/LiveVersionMap.java | 2 +- .../index/engine/LuceneChangesSnapshot.java | 3 +- .../index/engine/ReadOnlyEngine.java | 4 +- .../index/engine/RecoveryCounter.java | 65 ------------------- .../RecoverySourcePruneMergePolicy.java | 3 +- .../elasticsearch/index/engine/Segment.java | 18 ++--- .../index/engine/SegmentsStats.java | 25 ++++--- .../engine/SnapshotFailedEngineException.java | 7 +- .../index/engine/TranslogLeafReader.java | 5 +- .../VersionConflictEngineException.java | 6 +- .../index/engine/FrozenEngine.java | 2 +- 13 files changed, 47 insertions(+), 138 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 63659126f8438..2d210b716d4b7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -911,7 +911,7 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment map.put(extension, length); } - if (useCompoundFile && directory != null) { + if (useCompoundFile) { try { directory.close(); } catch (IOException e) { @@ -954,8 +954,7 @@ protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boole // now, correlate or add the committed ones... if (lastCommittedSegmentInfos != null) { - SegmentInfos infos = lastCommittedSegmentInfos; - for (SegmentCommitInfo info : infos) { + for (SegmentCommitInfo info : lastCommittedSegmentInfos) { Segment segment = segments.get(info.info.name); if (segment == null) { segment = new Segment(info.info.name); @@ -1783,11 +1782,8 @@ public boolean equals(Object o) { CommitId commitId = (CommitId) o; - if (!Arrays.equals(id, commitId.id)) { - return false; - } + return Arrays.equals(id, commitId.id); - return true; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 24d1078510c0b..9fb63d0de019d 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -560,7 +560,7 @@ private String loadTranslogUUIDFromLastCommit() throws IOException { /** * Reads the current stored history ID from the IW commit data. */ - private String loadHistoryUUID(final IndexWriter writer) throws IOException { + private String loadHistoryUUID(final IndexWriter writer) { final String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY); if (uuid == null) { throw new IllegalStateException("commit doesn't contain history uuid"); @@ -632,9 +632,8 @@ public GetResult get(Get get, BiFunction search if (operation != null) { // in the case of a already pruned translog generation we might get null here - yet very unlikely final Translog.Index index = (Translog.Index) operation; - TranslogLeafReader reader = new TranslogLeafReader(index, engineConfig - .getIndexSettings().getIndexVersionCreated()); - return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader::close), + TranslogLeafReader reader = new TranslogLeafReader(index); + return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader), new VersionsAndSeqNoResolver.DocIdAndVersion(0, index.version(), index.seqNo(), index.primaryTerm(), reader, 0)); } @@ -753,7 +752,7 @@ private boolean canOptimizeAddDocument(Index index) { + index.getAutoGeneratedIdTimestamp(); switch (index.origin()) { case PRIMARY: - assertPrimaryCanOptimizeAddDocument(index); + assert assertPrimaryCanOptimizeAddDocument(index); return true; case PEER_RECOVERY: case REPLICA: @@ -779,7 +778,7 @@ protected boolean assertPrimaryCanOptimizeAddDocument(final Index index) { private boolean assertIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { if (origin == Operation.Origin.PRIMARY) { - assertPrimaryIncomingSequenceNumber(origin, seqNo); + assert assertPrimaryIncomingSequenceNumber(origin, seqNo); } else { // sequence number should be set when operation origin is not primary assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no.; origin: " + origin; @@ -920,7 +919,7 @@ public IndexResult index(Index index) throws IOException { } protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException { - assertNonPrimaryOrigin(index); + assert assertNonPrimaryOrigin(index); final IndexingStrategy plan; final boolean appendOnlyRequest = canOptimizeAddDocument(index); if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) { @@ -975,13 +974,13 @@ protected IndexingStrategy indexingStrategyForOperation(final Index index) throw } } - protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { + private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { assert index.origin() == Operation.Origin.PRIMARY : "planing as primary but origin isn't. got " + index.origin(); final IndexingStrategy plan; // resolve an external operation into an internal one which is safe to replay if (canOptimizeAddDocument(index)) { if (mayHaveBeenIndexedBefore(index)) { - plan = IndexingStrategy.overrideExistingAsIfNotThere(1L); + plan = IndexingStrategy.overrideExistingAsIfNotThere(); versionMap.enforceSafeAccess(); } else { plan = IndexingStrategy.optimizedAppendOnly(1L); @@ -1003,7 +1002,7 @@ protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOExc if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(), index.getIfSeqNo(), index.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); - plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm()); + plan = IndexingStrategy.skipDueToVersionConflict(e, true, currentVersion, getPrimaryTerm()); } else if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != index.getIfSeqNo() || versionValue.term != index.getIfPrimaryTerm() )) { @@ -1161,9 +1160,9 @@ static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted, true, false, versionForIndexing, null); } - static IndexingStrategy overrideExistingAsIfNotThere(long versionForIndexing) { + static IndexingStrategy overrideExistingAsIfNotThere() { return new IndexingStrategy(true, true, true, - false, versionForIndexing, null); + false, 1L, null); } public static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, long versionForIndexing) { @@ -1282,7 +1281,7 @@ protected DeletionStrategy deletionStrategyForOperation(final Delete delete) thr } protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOException { - assertNonPrimaryOrigin(delete); + assert assertNonPrimaryOrigin(delete); maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(delete.seqNo(), curr)); assert maxSeqNoOfNonAppendOnlyOperations.get() >= delete.seqNo() : "max_seqno of non-append-only was not updated;" + "max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of delete [" + delete.seqNo() + "]"; @@ -1302,7 +1301,7 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws } else { final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { - plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, false, delete.version()); + plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, delete.version()); } else { plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, delete.version()); } @@ -1315,7 +1314,7 @@ protected boolean assertNonPrimaryOrigin(final Operation operation) { return true; } - protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException { + private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException { assert delete.origin() == Operation.Origin.PRIMARY : "planing as primary but got " + delete.origin(); // resolve operation from external to internal final VersionValue versionValue = resolveDocVersion(delete, delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO); @@ -1333,7 +1332,7 @@ protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOE if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(), delete.getIfSeqNo(), delete.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); - plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted); + plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), true); } else if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != delete.getIfSeqNo() || versionValue.term != delete.getIfPrimaryTerm() )) { @@ -1425,8 +1424,8 @@ public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, lo return new DeletionStrategy(false, false, currentlyDeleted, versionOfDeletion, null); } - static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, boolean currentlyDeleted, long versionOfDeletion) { - return new DeletionStrategy(false, addStaleOpToLucene, currentlyDeleted, versionOfDeletion, null); + static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, long versionOfDeletion) { + return new DeletionStrategy(false, addStaleOpToLucene, false, versionOfDeletion, null); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index e4dce8919cf1e..ce955903af494 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -234,7 +234,7 @@ long getMinDeleteTimestamp() { /** * Tracks bytes used by tombstones (deletes) */ - final AtomicLong ramBytesUsedTombstones = new AtomicLong(); + private final AtomicLong ramBytesUsedTombstones = new AtomicLong(); @Override public void beforeRefresh() throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index c9550a61f9e58..a3e86ab1606df 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -188,8 +188,7 @@ private void fillParallelArray(ScoreDoc[] scoreDocs, ParallelArray parallelArray int readerIndex = 0; CombinedDocValues combinedDocValues = null; LeafReaderContext leaf = null; - for (int i = 0; i < scoreDocs.length; i++) { - ScoreDoc scoreDoc = scoreDocs[i]; + for (ScoreDoc scoreDoc : scoreDocs) { if (scoreDoc.doc >= docBase + maxDoc) { do { leaf = leaves.get(readerIndex++); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index e7e0c4d927851..9d5f6054243e4 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -457,8 +457,8 @@ public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) { } - protected void processReaders(IndexReader reader, IndexReader previousReader) { - searcherFactory.processReaders(reader, previousReader); + protected void processReader(IndexReader reader) { + searcherFactory.processReaders(reader, null); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java b/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java deleted file mode 100644 index 31fddbedfb715..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.index.store.Store; - -import java.util.concurrent.atomic.AtomicInteger; - -/** - * RecoveryCounter keeps tracks of the number of ongoing recoveries for a - * particular {@link Store} - */ -public class RecoveryCounter implements Releasable { - - private final Store store; - - RecoveryCounter(Store store) { - this.store = store; - } - - private final AtomicInteger onGoingRecoveries = new AtomicInteger(); - - void startRecovery() { - store.incRef(); - onGoingRecoveries.incrementAndGet(); - } - - public int get() { - return onGoingRecoveries.get(); - } - - /** - * End the recovery counter by decrementing the store's ref and the ongoing recovery counter - * @return number of ongoing recoveries remaining - */ - int endRecovery() { - store.decRef(); - int left = onGoingRecoveries.decrementAndGet(); - assert onGoingRecoveries.get() >= 0 : "ongoingRecoveries must be >= 0 but was: " + onGoingRecoveries.get(); - return left; - } - - @Override - public void close() { - endRecovery(); - } -} diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java index 42276f4ca2108..a4221bf01f210 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java @@ -58,8 +58,7 @@ public CodecReader wrapForMerge(CodecReader reader) throws IOException { }); } - // pkg private for testing - static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier retainSourceQuerySupplier) + private static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier retainSourceQuerySupplier) throws IOException { NumericDocValues recoverySource = reader.getNumericDocValues(recoverySourceField); if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Segment.java b/server/src/main/java/org/elasticsearch/index/engine/Segment.java index 945359eda1b17..b1e6d09d897f2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Segment.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Segment.java @@ -39,6 +39,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Objects; public class Segment implements Streamable { @@ -93,10 +94,6 @@ public ByteSizeValue getSize() { return new ByteSizeValue(sizeInBytes); } - public long getSizeInBytes() { - return this.sizeInBytes; - } - public org.apache.lucene.util.Version getVersion() { return version; } @@ -144,9 +141,8 @@ public boolean equals(Object o) { Segment segment = (Segment) o; - if (name != null ? !name.equals(segment.name) : segment.name != null) return false; + return Objects.equals(name, segment.name); - return true; } @Override @@ -211,7 +207,7 @@ public void writeTo(StreamOutput out) throws IOException { } } - Sort readSegmentSort(StreamInput in) throws IOException { + private Sort readSegmentSort(StreamInput in) throws IOException { int size = in.readVInt(); if (size == 0) { return null; @@ -262,7 +258,7 @@ Sort readSegmentSort(StreamInput in) throws IOException { return new Sort(fields); } - void writeSegmentSort(StreamOutput out, Sort sort) throws IOException { + private void writeSegmentSort(StreamOutput out, Sort sort) throws IOException { if (sort == null) { out.writeVInt(0); return; @@ -302,14 +298,14 @@ void writeSegmentSort(StreamOutput out, Sort sort) throws IOException { } } - Accountable readRamTree(StreamInput in) throws IOException { + private Accountable readRamTree(StreamInput in) throws IOException { final String name = in.readString(); final long bytes = in.readVLong(); int numChildren = in.readVInt(); if (numChildren == 0) { return Accountables.namedAccountable(name, bytes); } - List children = new ArrayList(numChildren); + List children = new ArrayList<>(numChildren); while (numChildren-- > 0) { children.add(readRamTree(in)); } @@ -317,7 +313,7 @@ Accountable readRamTree(StreamInput in) throws IOException { } // the ram tree is written recursively since the depth is fairly low (5 or 6) - void writeRamTree(StreamOutput out, Accountable tree) throws IOException { + private void writeRamTree(StreamOutput out, Accountable tree) throws IOException { out.writeString(tree.toString()); out.writeVLong(tree.ramBytesUsed()); Collection children = tree.getChildResources(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java index 2d22a6f3caf20..ae78de574531f 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Iterator; public class SegmentsStats implements Streamable, Writeable, ToXContentFragment { @@ -54,7 +53,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment * Ideally this should be in sync to what the current version of Lucene is using, but it's harmless to leave extensions out, * they'll just miss a proper description in the stats */ - private static ImmutableOpenMap fileDescriptions = ImmutableOpenMap.builder() + private static final ImmutableOpenMap FILE_DESCRIPTIONS = ImmutableOpenMap.builder() .fPut("si", "Segment Info") .fPut("fnm", "Fields") .fPut("fdx", "Field Index") @@ -150,8 +149,7 @@ public void addBitsetMemoryInBytes(long bitsetMemoryInBytes) { public void addFileSizes(ImmutableOpenMap fileSizes) { ImmutableOpenMap.Builder map = ImmutableOpenMap.builder(this.fileSizes); - for (Iterator> it = fileSizes.iterator(); it.hasNext();) { - ObjectObjectCursor entry = it.next(); + for (ObjectObjectCursor entry : fileSizes) { if (map.containsKey(entry.key)) { Long oldValue = map.get(entry.key); map.put(entry.key, oldValue + entry.value); @@ -206,7 +204,7 @@ public long getTermsMemoryInBytes() { return this.termsMemoryInBytes; } - public ByteSizeValue getTermsMemory() { + private ByteSizeValue getTermsMemory() { return new ByteSizeValue(termsMemoryInBytes); } @@ -217,7 +215,7 @@ public long getStoredFieldsMemoryInBytes() { return this.storedFieldsMemoryInBytes; } - public ByteSizeValue getStoredFieldsMemory() { + private ByteSizeValue getStoredFieldsMemory() { return new ByteSizeValue(storedFieldsMemoryInBytes); } @@ -228,7 +226,7 @@ public long getTermVectorsMemoryInBytes() { return this.termVectorsMemoryInBytes; } - public ByteSizeValue getTermVectorsMemory() { + private ByteSizeValue getTermVectorsMemory() { return new ByteSizeValue(termVectorsMemoryInBytes); } @@ -239,7 +237,7 @@ public long getNormsMemoryInBytes() { return this.normsMemoryInBytes; } - public ByteSizeValue getNormsMemory() { + private ByteSizeValue getNormsMemory() { return new ByteSizeValue(normsMemoryInBytes); } @@ -250,7 +248,7 @@ public long getPointsMemoryInBytes() { return this.pointsMemoryInBytes; } - public ByteSizeValue getPointsMemory() { + private ByteSizeValue getPointsMemory() { return new ByteSizeValue(pointsMemoryInBytes); } @@ -261,7 +259,7 @@ public long getDocValuesMemoryInBytes() { return this.docValuesMemoryInBytes; } - public ByteSizeValue getDocValuesMemory() { + private ByteSizeValue getDocValuesMemory() { return new ByteSizeValue(docValuesMemoryInBytes); } @@ -326,11 +324,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory()); builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp); builder.startObject(Fields.FILE_SIZES); - for (Iterator> it = fileSizes.iterator(); it.hasNext();) { - ObjectObjectCursor entry = it.next(); + for (ObjectObjectCursor entry : fileSizes) { builder.startObject(entry.key); builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(entry.value)); - builder.field(Fields.DESCRIPTION, fileDescriptions.getOrDefault(entry.key, "Others")); + builder.field(Fields.DESCRIPTION, FILE_DESCRIPTIONS.getOrDefault(entry.key, "Others")); builder.endObject(); } builder.endObject(); @@ -391,7 +388,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(fileSizes.size()); for (ObjectObjectCursor entry : fileSizes) { out.writeString(entry.key); - out.writeLong(entry.value.longValue()); + out.writeLong(entry.value); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java b/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java index f669139c07e2a..d858ccb0ab667 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java @@ -20,17 +20,12 @@ package org.elasticsearch.index.engine; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.ShardId; import java.io.IOException; public class SnapshotFailedEngineException extends EngineException { - public SnapshotFailedEngineException(ShardId shardId, Throwable cause) { - super(shardId, "Snapshot failed", cause); - } - public SnapshotFailedEngineException(StreamInput in) throws IOException{ super(in); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java index c1f92966196a3..d40e7d04e3ef3 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java @@ -35,7 +35,6 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -61,11 +60,9 @@ final class TranslogLeafReader extends LeafReader { private static final FieldInfo FAKE_ID_FIELD = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false); - private final Version indexVersionCreated; - TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) { + TranslogLeafReader(Translog.Index operation) { this.operation = operation; - this.indexVersionCreated = indexVersionCreated; } @Override public CacheHelper getCoreCacheHelper() { diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java index 0f6c217409c30..c869e2bc386aa 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java @@ -42,11 +42,7 @@ public VersionConflictEngineException(ShardId shardId, String id, } public VersionConflictEngineException(ShardId shardId, String id, String explanation) { - this(shardId, null, id, explanation); - } - - public VersionConflictEngineException(ShardId shardId, Throwable cause, String id, String explanation) { - this(shardId, "[{}]: version conflict, {}", cause, id, explanation); + this(shardId, "[{}]: version conflict, {}", null, id, explanation); } public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java index e9b57e316cccc..50f1125b275f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java @@ -169,7 +169,7 @@ private synchronized DirectoryReader getOrOpenReader() throws IOException { listeners.beforeRefresh(); } reader = DirectoryReader.open(engineConfig.getStore().directory()); - processReaders(reader, null); + processReader(reader); reader = lastOpenedReader = wrapReader(reader, Function.identity()); reader.getReaderCacheHelper().addClosedListener(this::onReaderClosed); for (ReferenceManager.RefreshListener listeners : config ().getInternalRefreshListener()) { From 28aae648feb921727a6690c193f1162ed87c8e38 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Wed, 22 May 2019 14:25:54 +0300 Subject: [PATCH 193/321] TestClusters: Convert docs (#42100) * TestClusters: Convert docs --- .../gradle/doc/DocsTestPlugin.groovy | 10 ++-- .../testclusters/ElasticsearchCluster.java | 12 +++-- .../testclusters/ElasticsearchNode.java | 11 +++- .../TestClusterConfiguration.java | 3 ++ .../testclusters/TestClustersPluginIT.java | 28 +++++----- docs/build.gradle | 18 ++++--- docs/reference/cluster/health.asciidoc | 2 +- docs/reference/getting-started.asciidoc | 2 +- x-pack/docs/build.gradle | 54 ++----------------- 9 files changed, 60 insertions(+), 80 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index a0ce24e45c729..805a1b213e859 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.doc +import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.test.ClusterFormationTasks @@ -32,10 +33,13 @@ public class DocsTestPlugin extends RestTestPlugin { @Override public void apply(Project project) { + project.pluginManager.apply('elasticsearch.testclusters') project.pluginManager.apply('elasticsearch.standalone-rest-test') super.apply(project) + String distribution = System.getProperty('tests.distribution', 'default') // The distribution can be configured with -Dtests.distribution on the command line - project.integTestCluster.distribution = System.getProperty('tests.distribution', 'default') + project.testClusters.integTest.distribution = distribution.toUpperCase() + project.testClusters.integTest.nameCustomization = { it.replace("integTest", "node") } // Docs are published separately so no need to assemble project.tasks.assemble.enabled = false Map defaultSubstitutions = [ @@ -46,8 +50,8 @@ public class DocsTestPlugin extends RestTestPlugin { '\\{version\\}': Version.fromString(VersionProperties.elasticsearch).toString(), '\\{version_qualified\\}': VersionProperties.elasticsearch, '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), - '\\{build_flavor\\}' : project.integTestCluster.distribution, - '\\{build_type\\}' : ClusterFormationTasks.getOs().equals("windows") ? "zip" : "tar", + '\\{build_flavor\\}' : distribution, + '\\{build_type\\}' : OS.conditionalString().onWindows({"zip"}).onUnix({"tar"}).supply(), ] Task listSnippets = project.tasks.create('listSnippets', SnippetsTask) listSnippets.group 'Docs' diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index 0cb7ee0c10fc7..e245fb0ead95a 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -42,6 +42,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; +import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -70,7 +71,7 @@ public ElasticsearchCluster(String path, String clusterName, Project project, Fi this.nodes = project.container(ElasticsearchNode.class); this.nodes.add( new ElasticsearchNode( - path, clusterName + "-1", + path, clusterName + "-0", services, artifactsExtractDir, workingDirBase ) ); @@ -91,7 +92,7 @@ public void setNumberOfNodes(int numberOfNodes) { ); } - for (int i = nodes.size() + 1 ; i <= numberOfNodes; i++) { + for (int i = nodes.size() ; i < numberOfNodes; i++) { this.nodes.add(new ElasticsearchNode( path, clusterName + "-" + i, services, artifactsExtractDir, workingDirBase )); @@ -99,7 +100,7 @@ public void setNumberOfNodes(int numberOfNodes) { } private ElasticsearchNode getFirstNode() { - return nodes.getAt(clusterName + "-1"); + return nodes.getAt(clusterName + "-0"); } public int getNumberOfNodes() { @@ -276,6 +277,11 @@ public void stop(boolean tailLogs) { nodes.forEach(each -> each.stop(tailLogs)); } + @Override + public void setNameCustomization(Function nameCustomization) { + nodes.all(each -> each.setNameCustomization(nameCustomization)); + } + @Override public boolean isProcessAlive() { return nodes.stream().noneMatch(node -> node.isProcessAlive() == false); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 3bb1fb2ddb6e3..bba94f6c7d173 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -50,6 +50,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -103,6 +104,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private String version; private File javaHome; private volatile Process esProcess; + private Function nameCustomization = Function.identity(); ElasticsearchNode(String path, String name, GradleServicesAdapter services, File artifactsExtractDir, File workingDirBase) { this.path = path; @@ -123,7 +125,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { } public String getName() { - return name; + return nameCustomization.apply(name); } public String getVersion() { @@ -536,6 +538,11 @@ public synchronized void stop(boolean tailLogs) { esProcess = null; } + @Override + public void setNameCustomization(Function nameCustomizer) { + this.nameCustomization = nameCustomizer; + } + private void stopHandle(ProcessHandle processHandle, boolean forcibly) { // Stop all children first, ES could actually be a child when there's some wrapper process like on Windows. if (processHandle.isAlive() == false) { @@ -656,7 +663,7 @@ private void syncWithLinks(Path sourceRoot, Path destinationRoot) { } private void createConfiguration() { - defaultConfig.put("node.name", safeName(name)); + defaultConfig.put("node.name", nameCustomization.apply(safeName(name))); defaultConfig.put("path.repo", confPathRepo.toAbsolutePath().toString()); defaultConfig.put("path.data", confPathData.toAbsolutePath().toString()); defaultConfig.put("path.logs", confPathLogs.toAbsolutePath().toString()); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java index 628dadcbb9d37..1ccbeabd4b88a 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java @@ -29,6 +29,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; @@ -85,6 +86,8 @@ public interface TestClusterConfiguration { void stop(boolean tailLogs); + void setNameCustomization(Function nameSupplier); + default void waitForConditions( LinkedHashMap> waitConditions, long startedAtMillis, diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index c9086d1459afd..39651ff896057 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -86,8 +86,8 @@ public void testUseClusterBySkippedAndWorkingTask() { assertOutputContains( result.getOutput(), "> Task :user1", - "Starting `node{::myTestCluster-1}`", - "Stopping `node{::myTestCluster-1}`" + "Starting `node{::myTestCluster-0}`", + "Stopping `node{::myTestCluster-0}`" ); } @@ -104,22 +104,22 @@ public void testMultiProject() { assertStartedAndStoppedOnce(result); assertOutputOnlyOnce( result.getOutput(), - "Starting `node{:alpha:myTestCluster-1}`", - "Stopping `node{::myTestCluster-1}`" + "Starting `node{:alpha:myTestCluster-0}`", + "Stopping `node{::myTestCluster-0}`" ); assertOutputOnlyOnce( result.getOutput(), - "Starting `node{::myTestCluster-1}`", - "Stopping `node{:bravo:myTestCluster-1}`" + "Starting `node{::myTestCluster-0}`", + "Stopping `node{:bravo:myTestCluster-0}`" ); } public void testReleased() { BuildResult result = getTestClustersRunner("testReleased").build(); assertTaskSuccessful(result, ":testReleased"); - assertStartedAndStoppedOnce(result, "releasedVersionDefault-1"); - assertStartedAndStoppedOnce(result, "releasedVersionOSS-1"); - assertStartedAndStoppedOnce(result, "releasedVersionIntegTest-1"); + assertStartedAndStoppedOnce(result, "releasedVersionDefault-0"); + assertStartedAndStoppedOnce(result, "releasedVersionOSS-0"); + assertStartedAndStoppedOnce(result, "releasedVersionIntegTest-0"); } public void testIncremental() { @@ -143,7 +143,7 @@ public void testUseClusterByFailingOne() { assertStartedAndStoppedOnce(result); assertOutputContains( result.getOutput(), - "Stopping `node{::myTestCluster-1}`, tailLogs: true", + "Stopping `node{::myTestCluster-0}`, tailLogs: true", "Execution failed for task ':itAlwaysFails'." ); } @@ -155,7 +155,7 @@ public void testUseClusterByFailingDependency() { assertStartedAndStoppedOnce(result); assertOutputContains( result.getOutput(), - "Stopping `node{::myTestCluster-1}`, tailLogs: true", + "Stopping `node{::myTestCluster-0}`, tailLogs: true", "Execution failed for task ':itAlwaysFails'." ); } @@ -165,7 +165,7 @@ public void testConfigurationLocked() { assertTaskFailed(result, ":illegalConfigAlter"); assertOutputContains( result.getOutput(), - "Configuration for node{::myTestCluster-1} can not be altered, already locked" + "Configuration for node{::myTestCluster-0} can not be altered, already locked" ); } @@ -173,9 +173,9 @@ public void testConfigurationLocked() { public void testMultiNode() { BuildResult result = getTestClustersRunner(":multiNode").build(); assertTaskSuccessful(result, ":multiNode"); + assertStartedAndStoppedOnce(result, "multiNode-0"); assertStartedAndStoppedOnce(result, "multiNode-1"); assertStartedAndStoppedOnce(result, "multiNode-2"); - assertStartedAndStoppedOnce(result, "multiNode-3"); } public void testPluginInstalled() { @@ -211,7 +211,7 @@ private void assertStartedAndStoppedOnce(BuildResult result, String nodeName) { } private void assertStartedAndStoppedOnce(BuildResult result) { - assertStartedAndStoppedOnce(result, "myTestCluster-1"); + assertStartedAndStoppedOnce(result, "myTestCluster-0"); } diff --git a/docs/build.gradle b/docs/build.gradle index 8156d1d54b57a..feda444301ec7 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1,3 +1,5 @@ +import static org.elasticsearch.gradle.Distribution.DEFAULT + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -35,15 +37,15 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/ml/apis/update-snapshot.asciidoc', ] -integTestCluster { - if ("default".equals(integTestCluster.distribution)) { +testClusters.integTest { + if (singleNode().distribution == DEFAULT) { setting 'xpack.license.self_generated.type', 'trial' } // enable regexes in painless so our tests don't complain about example snippets that use them setting 'script.painless.regex.enabled', 'true' Closure configFile = { - extraConfigFile it, "src/test/cluster/config/$it" + extraConfigFile it, file("src/test/cluster/config/$it") } configFile 'analysis/example_word_list.txt' configFile 'analysis/hyphenation_patterns.xml' @@ -52,8 +54,8 @@ integTestCluster { configFile 'userdict_ja.txt' configFile 'userdict_ko.txt' configFile 'KeywordTokenizer.rbbi' - extraConfigFile 'hunspell/en_US/en_US.aff', '../server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff' - extraConfigFile 'hunspell/en_US/en_US.dic', '../server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic' + extraConfigFile 'hunspell/en_US/en_US.aff', project(":server").file('src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff') + extraConfigFile 'hunspell/en_US/en_US.dic', project(":server").file('src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic') // Whitelist reindexing from the local node so we can test it. setting 'reindex.remote.whitelist', '127.0.0.1:*' } @@ -65,10 +67,12 @@ project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { if (subproj.path.startsWith(':plugins:repository-')) { return } + // FIXME subproj.afterEvaluate { // need to wait until the project has been configured - integTestCluster { - plugin subproj.path + testClusters.integTest { + plugin file(subproj.bundlePlugin.archiveFile) } + tasks.integTest.dependsOn subproj.bundlePlugin } } diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index 1e33455d02613..d75ce77d1af80 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -34,7 +34,7 @@ Returns this: "active_shards_percent_as_number": 50.0 } -------------------------------------------------- -// TESTRESPONSE[s/testcluster/docs_integTestCluster/] +// TESTRESPONSE[s/testcluster/integTest/] // TESTRESPONSE[s/"number_of_pending_tasks" : 0,/"number_of_pending_tasks" : $body.number_of_pending_tasks,/] // TESTRESPONSE[s/"task_max_waiting_in_queue_millis": 0/"task_max_waiting_in_queue_millis": $body.task_max_waiting_in_queue_millis/] diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index b81d2b284371d..7df9bdfe7aa6c 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -301,7 +301,7 @@ And the response: epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent 1475247709 17:01:49 elasticsearch green 1 1 0 0 0 0 0 0 - 100.0% -------------------------------------------------- -// TESTRESPONSE[s/1475247709 17:01:49 elasticsearch/\\d+ \\d+:\\d+:\\d+ docs_integTestCluster/] +// TESTRESPONSE[s/1475247709 17:01:49 elasticsearch/\\d+ \\d+:\\d+:\\d+ integTest/] // TESTRESPONSE[s/0 0 -/0 \\d+ -/] // TESTRESPONSE[_cat] diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 0075b4989e69f..0a23bb9c9cf62 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -27,54 +27,14 @@ dependencies { testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') } -Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> - File tmpFile = new File(node.cwd, 'wait.success') - // wait up to twenty seconds - final long stopTime = System.currentTimeMillis() + 20000L; - Exception lastException = null; - while (System.currentTimeMillis() < stopTime) { - lastException = null; - // we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned - HttpURLConnection httpURLConnection = null; - try { - httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health").openConnection(); - httpURLConnection.setRequestProperty("Authorization", "Basic " + - Base64.getEncoder().encodeToString("test_admin:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); - httpURLConnection.setRequestMethod("GET"); - httpURLConnection.setConnectTimeout(1000); - httpURLConnection.setReadTimeout(30000); - httpURLConnection.connect(); - if (httpURLConnection.getResponseCode() == 200) { - tmpFile.withWriter StandardCharsets.UTF_8.name(), { - it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) - } - break; - } - } catch (Exception e) { - logger.debug("failed to call cluster health", e) - lastException = e - } finally { - if (httpURLConnection != null) { - httpURLConnection.disconnect(); - } - } - - // did not start, so wait a bit before trying again - Thread.sleep(500L); - } - if (tmpFile.exists() == false && lastException != null) { - logger.error("final attempt of calling cluster health failed", lastException) - } - return tmpFile.exists() -} - // copy xpack rest api File xpackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') project.copyRestSpec.from(xpackResources) { include 'rest-api-spec/api/**' } -File jwks = new File(xpackProject('test:idp-fixture').projectDir, 'oidc/op-jwks.json') -integTestCluster { + +testClusters.integTest { + extraConfigFile 'op-jwks.json', xpackProject('test:idp-fixture').file("oidc/op-jwks.json") setting 'xpack.security.enabled', 'true' setting 'xpack.security.authc.api_key.enabled', 'true' setting 'xpack.security.authc.token.enabled', 'true' @@ -91,17 +51,13 @@ integTestCluster { setting 'xpack.security.authc.realms.oidc.oidc1.op.jwkset_path', 'op-jwks.json' setting 'xpack.security.authc.realms.oidc.oidc1.rp.redirect_uri', 'https://my.fantastic.rp/cb' setting 'xpack.security.authc.realms.oidc.oidc1.rp.client_id', 'elasticsearch-rp' - keystoreSetting 'xpack.security.authc.realms.oidc.oidc1.rp.client_secret', 'b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2' + keystore 'xpack.security.authc.realms.oidc.oidc1.rp.client_secret', 'b07efb7a1cf6ec9462afe7b6d3ab55c6c7880262aa61ac28dded292aca47c9a2' setting 'xpack.security.authc.realms.oidc.oidc1.rp.response_type', 'id_token' setting 'xpack.security.authc.realms.oidc.oidc1.claims.principal', 'sub' - setupCommand 'setupTestAdmin', - 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' - waitCondition = waitWithAuth - extraConfigFile 'op-jwks.json', jwks + user username: 'test_admin' } - buildRestTests.docs = fileTree(projectDir) { // No snippets in here! exclude 'build.gradle' From 385dfd95d6d149b9d9ca117768fe48a1dec1f7b6 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 22 May 2019 08:10:10 -0400 Subject: [PATCH 194/321] Update version skips and constants after backport (#42290) After https://github.com/elastic/elasticsearch/pull/41906 was backported, we need to update the various test skips and version constants --- .../test/search.aggregation/230_composite.yml | 4 ++-- .../test/search.aggregation/250_moving_fn.yml | 4 ++-- .../test/search.aggregation/80_typed_keys.yml | 2 +- .../rest-api-spec/test/search/240_date_nanos.yml | 2 +- .../bucket/histogram/DateIntervalWrapper.java | 4 ++-- .../xpack/restart/FullClusterRestartIT.java | 2 +- .../upgrades/RollupDateHistoUpgradeIT.java | 2 +- .../test/mixed_cluster/40_ml_datafeed_crud.yml | 6 +----- .../test/old_cluster/40_ml_datafeed_crud.yml | 13 ++++--------- .../test/upgraded_cluster/40_ml_datafeed_crud.yml | 4 ---- 10 files changed, 15 insertions(+), 28 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 4003d29abb5bf..fc0710fdb5375 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -241,7 +241,7 @@ setup: --- "Composite aggregation with format": - skip: - version: " - 7.99.99" #TODO change this after backport + version: " - 7.1.99" reason: calendar_interval introduced in 7.2.0 features: warnings @@ -307,7 +307,7 @@ setup: --- "Composite aggregation with format and calendar_interval": - skip: - version: " - 7.99.99" #TODO change this after backport + version: " - 7.1.99" reason: calendar_interval introduced in 7.2.0 - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml index a4517d46d2c62..cd24da7bd616b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml @@ -2,7 +2,7 @@ "Bad window": - skip: - version: " - 7.99.0" #TODO change this after backport + version: " - 7.1.99" reason: "calendar_interval added in 7.2" - do: @@ -30,7 +30,7 @@ "Bad window deprecated interval": - skip: - version: " - 7.99.0" #TODO change this after backport + version: " - 7.1.99" reason: "interval deprecation added in 7.2" features: "warnings" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml index 023c08f3b2d50..d041432556430 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml @@ -206,7 +206,7 @@ setup: --- "Test typed keys parameter for date_histogram aggregation and max_bucket pipeline aggregation": - skip: - version: " - 7.99.0" #TODO change this after backport + version: " - 7.1.99" reason: "calendar_interval added in 7.2" - do: search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml index 352d5edf6b374..2caf9c7084792 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml @@ -123,7 +123,7 @@ setup: --- "date histogram aggregation with date and date_nanos mapping": - skip: - version: " - 7.99.99" #TODO change this after backport + version: " - 7.1.99" reason: calendar_interval introduced in 7.2.0 - do: diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index b08782f1fd37a..229fa0d15bb30 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -113,7 +113,7 @@ public static void declareIntervalFields(Object public DateIntervalWrapper() {} public DateIntervalWrapper(StreamInput in) throws IOException { - if (in.getVersion().before(Version.V_8_0_0)) { // TODO change this after backport + if (in.getVersion().before(Version.V_7_2_0)) { long interval = in.readLong(); DateHistogramInterval histoInterval = in.readOptionalWriteable(DateHistogramInterval::new); @@ -374,7 +374,7 @@ public boolean isEmpty() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(Version.V_8_0_0)) { // TODO change this after backport + if (out.getVersion().before(Version.V_7_2_0)) { if (intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL)) { out.writeLong(TimeValue.parseTimeValue(dateHistogramInterval.toString(), DateHistogramAggregationBuilder.NAME + ".innerWriteTo").getMillis()); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index f17aab309ba72..a62a23dac70b8 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -229,7 +229,7 @@ public void testRollupAfterRestart() throws Exception { final Request createRollupJobRequest = new Request("PUT", getRollupEndpoint() + "/job/rollup-job-test"); String intervalType; - if (getOldClusterVersion().onOrAfter(Version.V_8_0_0)) { // TODO change this after backport + if (getOldClusterVersion().onOrAfter(Version.V_7_2_0)) { intervalType = "fixed_interval"; } else { intervalType = "interval"; diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java index 035e29ccf771c..08ad9f09d599c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java @@ -34,7 +34,7 @@ public class RollupDateHistoUpgradeIT extends AbstractUpgradeTestCase { Version.fromString(System.getProperty("tests.upgrade_from_version")); public void testDateHistoIntervalUpgrade() throws Exception { - assumeTrue("DateHisto interval changed in 7.1", UPGRADE_FROM_VERSION.before(Version.V_7_2_0)); + assumeTrue("DateHisto interval changed in 7.2", UPGRADE_FROM_VERSION.before(Version.V_7_2_0)); switch (CLUSTER_TYPE) { case OLD: break; diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index 4d732015d47f4..2ff9b08e9b13f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,8 +1,3 @@ -setup: - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" - --- "Test old cluster datafeed without aggs": - do: @@ -114,6 +109,7 @@ setup: - do: warnings: - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' + - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' ml.put_datafeed: datafeed_id: mixed-cluster-datafeed-with-aggs body: > diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index 2a7b56adb9a16..4918dde9ba899 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -1,8 +1,3 @@ -setup: - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" - --- "Put job and datafeed without aggs in old cluster": @@ -53,8 +48,8 @@ setup: --- "Put job and datafeed with aggs in old cluster - pre-deprecated interval": - skip: - version: "all" #TODO change this after backport - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258; calendar_interval introduced in 7.2.0" + version: "7.1.99 - " + reason: "calendar_interval introduced in 7.2.0" - do: ml.put_job: @@ -123,8 +118,8 @@ setup: --- "Put job and datafeed with aggs in old cluster - deprecated interval with warning": - skip: - version: " - 7.99.99" #TODO change this after backport - reason: calendar_interval introduced in 7.1.0 + version: " - 7.1.99" + reason: calendar_interval introduced in 7.2.0 features: warnings - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index 4b742e10de61f..5dc71ecb0679e 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" - - do: cluster.health: wait_for_status: green From 1e9221da84ad0da66de87eb82c95c1255a81a530 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 14:12:25 +0200 Subject: [PATCH 195/321] Remove Obsolete BwC Logic from BlobStoreRepository (#42193) * Remove Obsolete BwC Logic from BlobStoreRepository * We can't restore 1.3.3 files anyway -> no point in doing the dance of computing a hash here * Some other minor+obvious cleanups --- .../blobstore/BlobStoreRepository.java | 43 +------------------ .../blobstore/FileRestoreContext.java | 40 +---------------- 2 files changed, 3 insertions(+), 80 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 86409ebac7d31..49b551b26b796 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -26,8 +26,6 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RateLimiter; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; @@ -954,8 +952,6 @@ protected void finalize(final List snapshots, final Map blobs, final String reason) { final String indexGeneration = Integer.toString(fileListGeneration); - final String currentIndexGen = indexShardSnapshotsFormat.blobName(indexGeneration); - final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots); try { // Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier @@ -998,7 +994,8 @@ protected void finalize(final List snapshots, snapshotId, shardId), e); } } catch (IOException e) { - String message = "Failed to finalize " + reason + " with shard index [" + currentIndexGen + "]"; + String message = + "Failed to finalize " + reason + " with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]"; throw new IndexShardSnapshotFailedException(shardId, message, e); } } @@ -1135,16 +1132,6 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { List filesInfo = snapshots.findPhysicalIndexFiles(fileName); if (filesInfo != null) { for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) { - try { - // in 1.3.3 we added additional hashes for .si / segments_N files - // to ensure we don't double the space in the repo since old snapshots - // don't have this hash we try to read that hash from the blob store - // in a bwc compatible way. - maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", - shardId, fileInfo.physicalName(), fileInfo.metadata()), e); - } if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) { // a commit point file with the same name, size and checksum was already copied to repository // we will reuse it for this snapshot @@ -1315,32 +1302,6 @@ private void checkAborted() { } } - /** - * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. - * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the - * comparison of the files on a per-segment / per-commit level. - */ - private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo, - Store.MetadataSnapshot snapshot) throws Exception { - final StoreFileMetaData metadata; - if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { - if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { - // we have a hash - check if our repo has a hash too otherwise we have - // to calculate it. - // we might have multiple parts even though the file is small... make sure we read all of it. - try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) { - BytesRefBuilder builder = new BytesRefBuilder(); - Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); - BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash - assert hash.length == 0; - hash.bytes = builder.bytes(); - hash.offset = 0; - hash.length = builder.length(); - } - } - } - } - private static final class PartSliceStream extends SlicedInputStream { private final BlobContainer container; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java index f78ddab9ee44c..3abe4d7b50722 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java @@ -27,8 +27,6 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.shard.ShardId; @@ -127,17 +125,6 @@ public void restore(SnapshotFiles snapshotFiles, Store store) throws IOException final Map snapshotMetaData = new HashMap<>(); final Map fileInfos = new HashMap<>(); for (final BlobStoreIndexShardSnapshot.FileInfo fileInfo : snapshotFiles.indexFiles()) { - try { - // in 1.3.3 we added additional hashes for .si / segments_N files - // to ensure we don't double the space in the repo since old snapshots - // don't have this hash we try to read that hash from the blob store - // in a bwc compatible way. - maybeRecalculateMetadataHash(fileInfo, recoveryTargetMetadata); - } catch (Exception e) { - // if the index is broken we might not be able to read it - logger.warn(new ParameterizedMessage("[{}] Can't calculate hash from blog for file [{}] [{}]", shardId, - fileInfo.physicalName(), fileInfo.metadata()), e); - } snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata()); fileInfos.put(fileInfo.metadata().name(), fileInfo); } @@ -237,7 +224,7 @@ protected void restoreFiles(List filesToRe protected abstract InputStream fileInputStream(BlobStoreIndexShardSnapshot.FileInfo fileInfo); @SuppressWarnings("unchecked") - private Iterable concat(Store.RecoveryDiff diff) { + private static Iterable concat(Store.RecoveryDiff diff) { return Iterables.concat(diff.different, diff.missing); } @@ -276,29 +263,4 @@ private void restoreFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, fi } } - /** - * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. - * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the - * comparison of the files on a per-segment / per-commit level. - */ - private void maybeRecalculateMetadataHash(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot) - throws IOException { - final StoreFileMetaData metadata; - if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { - if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { - // we have a hash - check if our repo has a hash too otherwise we have - // to calculate it. - // we might have multiple parts even though the file is small... make sure we read all of it. - try (InputStream stream = fileInputStream(fileInfo)) { - BytesRefBuilder builder = new BytesRefBuilder(); - Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); - BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash - assert hash.length == 0; - hash.bytes = builder.bytes(); - hash.offset = 0; - hash.length = builder.length(); - } - } - } - } } From 05809deb490f71a90a4164c302955c5c2ab6d8ac Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 14:21:48 +0200 Subject: [PATCH 196/321] Revert "Mute SpecificMasterNodesIT.testElectOnlyBetweenMasterNodes()" This reverts commit 2964ceaa0371d8bd1665e599c6395a7e7026d094. --- .../java/org/elasticsearch/cluster/SpecificMasterNodesIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index f80a5befa83d9..38b9579eff046 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -86,7 +86,6 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38331") public void testElectOnlyBetweenMasterNodes() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start data node / non master node"); From d292d95eaada378c216ce00d11e68db79954d359 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 14:36:17 +0200 Subject: [PATCH 197/321] Fix testCannotJoinIfMasterLostDataFolder Relates to #41047 --- .../discovery/ClusterDisruptionIT.java | 23 +++++++++++++++---- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 3a257ec5973f8..ad3b8006ed0c3 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.coordination.ClusterBootstrapService; +import org.elasticsearch.cluster.coordination.LagDetector; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.cluster.routing.ShardRouting; @@ -389,7 +390,6 @@ public void onFailure(Exception e) { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41047") public void testCannotJoinIfMasterLostDataFolder() throws Exception { String masterNode = internalCluster().startMasterOnlyNode(); String dataNode = internalCluster().startDataOnlyNode(); @@ -402,7 +402,18 @@ public boolean clearData(String nodeName) { @Override public Settings onNodeStopped(String nodeName) { - return Settings.builder().put(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeName).build(); + return Settings.builder() + .put(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeName) + /* + * the data node might join while the master is still not fully established as master just yet and bypasses the join + * validation that is done before adding the node to the cluster. Only the join validation when handling the publish + * request takes place, but at this point the cluster state has been successfully committed, and will subsequently be + * exposed to the applier. The health check below therefore sees the cluster state with the 2 nodes and thinks all is + * good, even though the data node never accepted this state. What's worse is that it takes 90 seconds for the data + * node to be kicked out of the cluster (lag detection). We speed this up here. + */ + .put(LagDetector.CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING.getKey(), "10s") + .build(); } @Override @@ -411,9 +422,11 @@ public boolean validateClusterForming() { } }); - assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth().get().isTimedOut()); - assertTrue(internalCluster().client(masterNode).admin().cluster().prepareHealth().setWaitForNodes("2").setTimeout("2s").get() - .isTimedOut()); + assertBusy(() -> { + assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth().get().isTimedOut()); + assertTrue(internalCluster().client(masterNode).admin().cluster().prepareHealth().setWaitForNodes("2").setTimeout("2s").get() + .isTimedOut()); + }, 30, TimeUnit.SECONDS); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNode)); // otherwise we will fail during clean-up } From 40beecd1e04b81dfbc398e22132bf9411f54c6d5 Mon Sep 17 00:00:00 2001 From: markharwood Date: Wed, 22 May 2019 13:37:47 +0100 Subject: [PATCH 198/321] Search - enable low_level_cancellation by default. (#42291) Benchmarking on worst-case queries (max agg on match_all or popular-term query with large index) was not noticeably slower. Closes #26258 --- .../main/java/org/elasticsearch/search/SearchService.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index b703493b4d505..daf8e1faf7bb8 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -134,11 +134,11 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv /** * Enables low-level, frequent search cancellation checks. Enabling low-level checks will make long running searches to react - * to the cancellation request faster. However, since it will produce more cancellation checks it might slow the search performance - * down. + * to the cancellation request faster. It will produce more cancellation checks but benchmarking has shown these did not + * noticeably slow down searches. */ public static final Setting LOW_LEVEL_CANCELLATION_SETTING = - Setting.boolSetting("search.low_level_cancellation", false, Property.Dynamic, Property.NodeScope); + Setting.boolSetting("search.low_level_cancellation", true, Property.Dynamic, Property.NodeScope); public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = From b03d7b20928b481ee09418bcc39f1536b40493b0 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 14:45:26 +0200 Subject: [PATCH 199/321] Remove testNodeFailuresAreProcessedOnce This test was not checking the thing it was supposed to anyway. --- .../cluster/coordination/ZenDiscoveryIT.java | 36 ------------------- 1 file changed, 36 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java index 9a17c25f44cce..feffbfc792656 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java @@ -41,18 +41,14 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.RemoteTransportException; -import java.io.IOException; import java.util.EnumSet; import java.util.Optional; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicInteger; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -97,38 +93,6 @@ public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Excep assertThat(numRecoveriesAfterNewMaster, equalTo(numRecoveriesBeforeNewMaster)); } - public void testNodeFailuresAreProcessedOnce() throws IOException { - Settings masterNodeSettings = Settings.builder() - .put(Node.NODE_DATA_SETTING.getKey(), false) - .build(); - String master = internalCluster().startNode(masterNodeSettings); - Settings dateNodeSettings = Settings.builder() - .put(Node.NODE_MASTER_SETTING.getKey(), false) - .build(); - internalCluster().startNodes(2, dateNodeSettings); - client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); - - ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master); - final AtomicInteger numUpdates = new AtomicInteger(); - final CountDownLatch nodesStopped = new CountDownLatch(1); - clusterService.addStateApplier(event -> { - numUpdates.incrementAndGet(); - try { - // block until both nodes have stopped to accumulate node failures - nodesStopped.await(); - } catch (InterruptedException e) { - //meh - } - }); - - internalCluster().stopRandomNonMasterNode(); - internalCluster().stopRandomNonMasterNode(); - nodesStopped.countDown(); - - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); // wait for all to be processed - assertThat(numUpdates.get(), either(equalTo(1)).or(equalTo(2))); // due to batching, both nodes can be handled in same CS update - } - public void testHandleNodeJoin_incompatibleClusterState() throws InterruptedException, ExecutionException, TimeoutException { String masterNode = internalCluster().startMasterOnlyNode(); From 94848d8a8c27d2f0af4e3da7aa155fce1896d562 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 22 May 2019 15:31:29 +0200 Subject: [PATCH 200/321] Dump Stacktrace on Slow IO-Thread Operations (#42000) * Dump Stacktrace on Slow IO-Thread Operations * Follow up to #39729 extending the functionality to actually dump the stack when the thread is blocked not afterwards * Logging the stacktrace after the thread became unblocked is only of limited use because we don't know what happened in the slow callback from that (only whether we were blocked on a read,write,connect etc.) * Relates #41745 --- .../transport/nio/MockNioTransport.java | 71 ++++++++++- .../transport/nio/TestEventHandler.java | 114 ++++++++++-------- .../transport/nio/TestEventHandlerTests.java | 13 +- 3 files changed, 143 insertions(+), 55 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index dc0e14a4d2984..42dae39146605 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.nio.BytesChannelContext; @@ -57,11 +58,16 @@ import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; +import java.util.Arrays; import java.util.HashSet; +import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.IntFunction; +import java.util.stream.Collectors; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; @@ -70,6 +76,7 @@ public class MockNioTransport extends TcpTransport { private static final Logger logger = LogManager.getLogger(MockNioTransport.class); private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); + private final TransportThreadWatchdog transportThreadWatchdog; private volatile NioSelectorGroup nioGroup; private volatile MockTcpChannelFactory clientChannelFactory; @@ -77,6 +84,7 @@ public MockNioTransport(Settings settings, Version version, ThreadPool threadPoo PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + this.transportThreadWatchdog = new TransportThreadWatchdog(threadPool); } @Override @@ -96,7 +104,7 @@ protected void doStart() { boolean success = false; try { nioGroup = new NioSelectorGroup(daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2, - (s) -> new TestEventHandler(this::onNonChannelException, s, System::nanoTime)); + (s) -> new TestEventHandler(this::onNonChannelException, s, transportThreadWatchdog)); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); clientChannelFactory = new MockTcpChannelFactory(true, clientProfileSettings, "client"); @@ -125,6 +133,7 @@ protected void doStart() { @Override protected void stopInternal() { try { + transportThreadWatchdog.stop(); nioGroup.close(); } catch (Exception e) { logger.warn("unexpected exception while stopping nio group", e); @@ -311,4 +320,64 @@ public void sendMessage(BytesReference reference, ActionListener listener) getContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); } } + + static final class TransportThreadWatchdog { + + private static final long WARN_THRESHOLD = TimeUnit.MILLISECONDS.toNanos(150); + + // Only check every 2s to not flood the logs on a blocked thread. + // We mostly care about long blocks and not random slowness anyway and in tests would randomly catch slow operations that block for + // less than 2s eventually. + private static final TimeValue CHECK_INTERVAL = TimeValue.timeValueSeconds(2); + + private final ThreadPool threadPool; + private final ConcurrentHashMap registry = new ConcurrentHashMap<>(); + + private volatile boolean stopped; + + TransportThreadWatchdog(ThreadPool threadPool) { + this.threadPool = threadPool; + threadPool.schedule(this::logLongRunningExecutions, CHECK_INTERVAL, ThreadPool.Names.GENERIC); + } + + public boolean register() { + Long previousValue = registry.put(Thread.currentThread(), threadPool.relativeTimeInNanos()); + return previousValue == null; + } + + public void unregister() { + Long previousValue = registry.remove(Thread.currentThread()); + assert previousValue != null; + maybeLogElapsedTime(previousValue); + } + + private void maybeLogElapsedTime(long startTime) { + long elapsedTime = threadPool.relativeTimeInNanos() - startTime; + if (elapsedTime > WARN_THRESHOLD) { + logger.warn( + new ParameterizedMessage("Slow execution on network thread [{} milliseconds]", + TimeUnit.NANOSECONDS.toMillis(elapsedTime)), + new RuntimeException("Slow exception on network thread")); + } + } + + private void logLongRunningExecutions() { + for (Map.Entry entry : registry.entrySet()) { + final long elapsedTime = threadPool.relativeTimeInMillis() - entry.getValue(); + if (elapsedTime > WARN_THRESHOLD) { + final Thread thread = entry.getKey(); + logger.warn("Slow execution on network thread [{}] [{} milliseconds]: \n{}", thread.getName(), + TimeUnit.NANOSECONDS.toMillis(elapsedTime), + Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n"))); + } + } + if (stopped == false) { + threadPool.schedule(this::logLongRunningExecutions, CHECK_INTERVAL, ThreadPool.Names.GENERIC); + } + } + + public void stop() { + stopped = true; + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java index a70ecb0c59efa..069e19c34558c 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java @@ -19,9 +19,6 @@ package org.elasticsearch.transport.nio; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.nio.ChannelContext; import org.elasticsearch.nio.EventHandler; import org.elasticsearch.nio.NioSelector; @@ -32,185 +29,202 @@ import java.util.Collections; import java.util.Set; import java.util.WeakHashMap; -import java.util.concurrent.TimeUnit; import java.util.function.Consumer; -import java.util.function.LongSupplier; import java.util.function.Supplier; public class TestEventHandler extends EventHandler { - private static final Logger logger = LogManager.getLogger(TestEventHandler.class); - private final Set hasConnectedMap = Collections.newSetFromMap(new WeakHashMap<>()); private final Set hasConnectExceptionMap = Collections.newSetFromMap(new WeakHashMap<>()); - private final LongSupplier relativeNanosSupplier; + private final MockNioTransport.TransportThreadWatchdog transportThreadWatchdog; - TestEventHandler(Consumer exceptionHandler, Supplier selectorSupplier, LongSupplier relativeNanosSupplier) { + TestEventHandler(Consumer exceptionHandler, Supplier selectorSupplier, + MockNioTransport.TransportThreadWatchdog transportThreadWatchdog) { super(exceptionHandler, selectorSupplier); - this.relativeNanosSupplier = relativeNanosSupplier; + this.transportThreadWatchdog = transportThreadWatchdog; } @Override protected void acceptChannel(ServerChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.acceptChannel(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void acceptException(ServerChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.acceptException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleRegistration(ChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleRegistration(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void registrationException(ChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.registrationException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } public void handleConnect(SocketChannelContext context) throws IOException { assert hasConnectedMap.contains(context) == false : "handleConnect should only be called is a channel is not yet connected"; - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleConnect(context); if (context.isConnectComplete()) { hasConnectedMap.add(context); } } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } public void connectException(SocketChannelContext context, Exception e) { assert hasConnectExceptionMap.contains(context) == false : "connectException should only called at maximum once per channel"; + final boolean registered = transportThreadWatchdog.register(); hasConnectExceptionMap.add(context); - long startTime = relativeNanosSupplier.getAsLong(); try { super.connectException(context, e); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleRead(SocketChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleRead(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void readException(SocketChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.readException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleWrite(SocketChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleWrite(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void writeException(SocketChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.writeException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleTask(Runnable task) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleTask(task); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void taskException(Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.taskException(exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleClose(ChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleClose(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void closeException(ChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.closeException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void genericChannelException(ChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.genericChannelException(context, exception); } finally { - maybeLogElapsedTime(startTime); - } - } - - private static final long WARN_THRESHOLD = 150; - - private void maybeLogElapsedTime(long startTime) { - long elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeNanosSupplier.getAsLong() - startTime); - if (elapsedTime > WARN_THRESHOLD) { - logger.warn(new ParameterizedMessage("Slow execution on network thread [{} milliseconds]", elapsedTime), - new RuntimeException("Slow exception on network thread")); + if (registered) { + transportThreadWatchdog.unregister(); + } } } } diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java index 2a570eb59b6f6..424d4922f024e 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.threadpool.ThreadPool; import java.util.HashMap; import java.util.Map; @@ -34,6 +35,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.LongSupplier; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; public class TestEventHandlerTests extends ESTestCase { @@ -43,12 +45,12 @@ public class TestEventHandlerTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); appender = new MockLogAppender(); - Loggers.addAppender(LogManager.getLogger(TestEventHandler.class), appender); + Loggers.addAppender(LogManager.getLogger(MockNioTransport.class), appender); appender.start(); } public void tearDown() throws Exception { - Loggers.removeAppender(LogManager.getLogger(TestEventHandler.class), appender); + Loggers.removeAppender(LogManager.getLogger(MockNioTransport.class), appender); appender.stop(); super.tearDown(); } @@ -65,7 +67,10 @@ public void testLogOnElapsedTime() throws Exception { } throw new IllegalStateException("Cannot update isStart"); }; - TestEventHandler eventHandler = new TestEventHandler((e) -> {}, () -> null, timeSupplier); + final ThreadPool threadPool = mock(ThreadPool.class); + doAnswer(i -> timeSupplier.getAsLong()).when(threadPool).relativeTimeInNanos(); + TestEventHandler eventHandler = + new TestEventHandler((e) -> {}, () -> null, new MockNioTransport.TransportThreadWatchdog(threadPool)); ServerChannelContext serverChannelContext = mock(ServerChannelContext.class); SocketChannelContext socketChannelContext = mock(SocketChannelContext.class); @@ -91,7 +96,7 @@ public void testLogOnElapsedTime() throws Exception { for (Map.Entry> entry : tests.entrySet()) { String message = "*Slow execution on network thread*"; MockLogAppender.LoggingExpectation slowExpectation = - new MockLogAppender.SeenEventExpectation(entry.getKey(), TestEventHandler.class.getCanonicalName(), Level.WARN, message); + new MockLogAppender.SeenEventExpectation(entry.getKey(), MockNioTransport.class.getCanonicalName(), Level.WARN, message); appender.addExpectation(slowExpectation); entry.getValue().run(); appender.assertAllExpectationsMatched(); From 4a9438762a562d20e938d2ea82538805f33e85b1 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 22 May 2019 14:56:14 +0100 Subject: [PATCH 201/321] Mute Data Frame integration tests Relates to https://github.com/elastic/elasticsearch/issues/42344 --- .../xpack/dataframe/integration/DataFrameTransformIT.java | 1 + .../xpack/dataframe/integration/DataFrameAuditorIT.java | 2 ++ .../dataframe/integration/DataFrameConfigurationIndexIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameMetaDataIT.java | 2 ++ .../xpack/dataframe/integration/DataFramePivotRestIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameTaskFailedStateIT.java | 2 ++ .../dataframe/integration/DataFrameTransformProgressIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameUsageIT.java | 2 ++ 9 files changed, 17 insertions(+) diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index ecb2025c6a9c5..cc2e8c4436e06 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -28,6 +28,7 @@ public void cleanTransforms() { cleanUp(); } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public void testDataFrameTransformCrud() throws Exception { createReviewsIndex(); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java index 9884c9bb6793b..7dc79c1ae8fbe 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.junit.Before; @@ -22,6 +23,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameAuditorIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java index 681599331c8af..d7e12cf2bee4d 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java @@ -8,6 +8,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -22,6 +23,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { /** diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index d9927cd09ed8f..9bac6ca0b4049 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -21,6 +22,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_user"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java index 26a957ea055c2..5b95d1daead53 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -15,6 +16,7 @@ import java.io.IOException; import java.util.Map; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameMetaDataIT extends DataFrameRestTestCase { private boolean indicesCreated = false; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 770eaec7bd141..dab7e819881d2 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.junit.Before; @@ -21,6 +22,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFramePivotRestIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index 96aeeda8755f4..7b63644dd34ad 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; @@ -19,6 +20,7 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { public void testDummy() { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java index 194d35e8ba636..7d0fb179a2228 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -45,6 +46,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTransformProgressIT extends ESIntegTestCase { protected void createReviewsIndex() throws Exception { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java index 4f209c5a9f3f4..f98fa6a271365 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -22,6 +23,7 @@ import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE; import static org.elasticsearch.xpack.dataframe.DataFrameFeatureSet.PROVIDED_STATS; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameUsageIT extends DataFrameRestTestCase { private boolean indicesCreated = false; From a568c3c5dac681fc93cff6c64204c3d00b3c1bb1 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 22 May 2019 15:35:08 +0100 Subject: [PATCH 202/321] [ML Data Frame] Persist data frame after state changes (#42347) --- .../transforms/DataFrameTransformTask.java | 28 +++++++------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 9df6b5e3ab337..926f233c454d1 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -444,7 +444,6 @@ static class ClientDataFrameIndexer extends DataFrameIndexer { private final DataFrameTransformsCheckpointService transformsCheckpointService; private final String transformId; private final DataFrameTransformTask transformTask; - private volatile DataFrameIndexerTransformStats previouslyPersistedStats = null; private final AtomicInteger failureCount; // Keeps track of the last exception that was written to our audit, keeps us from spamming the audit index private volatile String lastAuditedExceptionMessage = null; @@ -552,25 +551,18 @@ protected void doSaveState(IndexerState indexerState, Map positi // only every-so-often when doing the bulk indexing calls. See AsyncTwoPhaseIndexer#onBulkResponse for current periodicity ActionListener> updateClusterStateListener = ActionListener.wrap( task -> { - // Only persist the stats if something has actually changed - if (previouslyPersistedStats == null || previouslyPersistedStats.equals(getStats()) == false) { - transformsConfigManager.putOrUpdateTransformStats( - new DataFrameTransformStateAndStats(transformId, state, getStats(), - DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null + transformsConfigManager.putOrUpdateTransformStats( + new DataFrameTransformStateAndStats(transformId, state, getStats(), + DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null ActionListener.wrap( - r -> { - previouslyPersistedStats = getStats(); - next.run(); - }, - statsExc -> { - logger.error("Updating stats of transform [" + transformConfig.getId() + "] failed", statsExc); - next.run(); - } + r -> { + next.run(); + }, + statsExc -> { + logger.error("Updating stats of transform [" + transformConfig.getId() + "] failed", statsExc); + next.run(); + } )); - // The stats that we have previously written to the doc is the same as as it is now, no need to update it - } else { - next.run(); - } }, exc -> { logger.error("Updating persistent state of transform [" + transformConfig.getId() + "] failed", exc); From c1d980cf3a37ae803d2e2ef6d87450039bf0ff7c Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 17:31:59 +0200 Subject: [PATCH 203/321] Fix testAutoFollowManyIndices On a slow CI worker, the test was failing an assertion. Closes #41234 --- .../java/org/elasticsearch/xpack/ccr/AutoFollowIT.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index 4fdb1fa00ab9a..0bcb3daac6284 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -31,6 +31,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -149,7 +150,7 @@ public void testAutoFollowManyIndices() throws Exception { AutoFollowMetadata autoFollowMetadata = metaData[0].custom(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), hasSize((int) expectedVal1)); assertThat(autoFollowStats[0].getNumberOfSuccessfulFollowIndices(), equalTo(expectedVal1)); - }); + }, 30, TimeUnit.SECONDS); } catch (AssertionError ae) { logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class))); logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0])); @@ -168,7 +169,7 @@ public void testAutoFollowManyIndices() throws Exception { AutoFollowMetadata autoFollowMetadata = metaData[0].custom(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), nullValue()); assertThat(autoFollowStats[0].getAutoFollowedClusters().size(), equalTo(0)); - }); + }, 30, TimeUnit.SECONDS); } catch (AssertionError ae) { logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class))); logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0])); @@ -199,7 +200,7 @@ public void testAutoFollowManyIndices() throws Exception { // Ensure that there are no auto follow errors: // (added specifically to see that there are no leader indices auto followed multiple times) assertThat(autoFollowStats[0].getRecentAutoFollowErrors().size(), equalTo(0)); - }); + }, 30, TimeUnit.SECONDS); } catch (AssertionError ae) { logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class))); logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0])); From 145c3bec7898f9a4e9bb43ade48d9103d8e30d88 Mon Sep 17 00:00:00 2001 From: Mengwei Ding Date: Wed, 22 May 2019 08:53:25 -0700 Subject: [PATCH 204/321] Add .code_internal-* index pattern to kibana user (#42247) --- .../xpack/core/security/authz/store/ReservedRolesStore.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 2c86971b529f9..49d4159f13968 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -118,8 +118,9 @@ private static Map initializeReservedRoles() { .indices(".monitoring-*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder() .indices(".management-beats").privileges("create_index", "read", "write").build(), + // .code_internal-* is for Code's internal worker queue index creation. RoleDescriptor.IndicesPrivileges.builder() - .indices(".code-*").privileges("all").build(), + .indices(".code-*", ".code_internal-*").privileges("all").build(), }, null, new ConditionalClusterPrivilege[] { new ManageApplicationPrivileges(Collections.singleton("kibana-*")) }, From d5888b23d73a245f40fa124a39d474b34c042156 Mon Sep 17 00:00:00 2001 From: mushao999 Date: Thu, 23 May 2019 00:05:48 +0800 Subject: [PATCH 205/321] Fix alpha version error message (#40406) --- server/src/main/java/org/elasticsearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 5089a7fe0cec9..ce0fc1559c18b 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -257,7 +257,7 @@ public static Version fromString(String version) { if (buildStr.startsWith("alpha")) { assert rawMajor >= 5 : "major must be >= 5 but was " + major; build = Integer.parseInt(buildStr.substring(5)); - assert build < 25 : "expected a beta build but " + build + " >= 25"; + assert build < 25 : "expected a alpha build but " + build + " >= 25"; } else if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) { build = betaOffset + Integer.parseInt(buildStr.substring(4)); assert build < 50 : "expected a beta build but " + build + " >= 50"; From 148df31639a983058b758f5eef2c9df2f9346e94 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Wed, 22 May 2019 09:19:14 -0700 Subject: [PATCH 206/321] Fix a rendering issue in the geo envelope docs. (#42332) Previously the formatting information didn't display in the docs, and the sentence just rendered as "bounding rectangle in the format :". --- docs/reference/mapping/types/geo-shape.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 1cf85e305a95d..26f59e1058c09 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -615,7 +615,7 @@ POST /example/_doc Elasticsearch supports an `envelope` type, which consists of coordinates for upper left and lower right points of the shape to represent a -bounding rectangle in the format [[minLon, maxLat],[maxLon, minLat]]: +bounding rectangle in the format `[[minLon, maxLat], [maxLon, minLat]]`: [source,js] -------------------------------------------------- From 943344fa48d8d1f83776863250af8b8fb52417fd Mon Sep 17 00:00:00 2001 From: swstepp <49322243+swstepp@users.noreply.github.com> Date: Wed, 22 May 2019 10:44:41 -0600 Subject: [PATCH 207/321] Fix grammar problem in stemming reference. (#42148) --- docs/reference/how-to/recipes/stemming.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index e8c213646578c..d7ddda116327e 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -171,7 +171,7 @@ the query need to be matched exactly while other parts should still take stemming into account? Fortunately, the `query_string` and `simple_query_string` queries have a feature -that solve this exact problem: `quote_field_suffix`. This tell Elasticsearch +that solves this exact problem: `quote_field_suffix`. This tells Elasticsearch that the words that appear in between quotes are to be redirected to a different field, see below: From 458aa6409f37ea636c1b099c99ff4369599cb17c Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Wed, 22 May 2019 11:58:50 -0500 Subject: [PATCH 208/321] add 7_3 as version (#42368) --- server/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index ce0fc1559c18b..e3381a3384c0e 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -92,6 +92,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_1_1 = new Version(V_7_1_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_2_0_ID = 7020099; public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_3_0_ID = 7030099; + public static final Version V_7_3_0 = new Version(V_7_3_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_8_0_0_ID = 8000099; public static final Version V_8_0_0 = new Version(V_8_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version CURRENT = V_8_0_0; @@ -110,6 +112,8 @@ public static Version fromId(int id) { switch (id) { case V_8_0_0_ID: return V_8_0_0; + case V_7_3_0_ID: + return V_7_3_0; case V_7_2_0_ID: return V_7_2_0; case V_7_1_1_ID: From d49d9b53d6e0ac8acda61913489fa55e5118f0c5 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 22 May 2019 19:07:56 +0200 Subject: [PATCH 209/321] Ensure testAckedIndexing uses disruption index settings AbstractDisruptionTestCase set a lower global checkpoint sync interval setting, but this was ignored by testAckedIndexing, which has led to spurious test failures Relates #41068, #38931 --- .../indices/recovery/PeerRecoveryTargetService.java | 2 +- .../java/org/elasticsearch/discovery/ClusterDisruptionIT.java | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 1ba854fdb2b13..6b1a893667f2c 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -356,7 +356,7 @@ private StartRecoveryRequest getStartRecoveryRequest(final RecoveryTarget recove logger.trace("{} preparing for file-based recovery from [{}]", recoveryTarget.shardId(), recoveryTarget.sourceNode()); } else { logger.trace( - "{} preparing for sequence-number-based recovery starting at local checkpoint [{}] from [{}]", + "{} preparing for sequence-number-based recovery starting at sequence number [{}] from [{}]", recoveryTarget.shardId(), startingSeqNo, recoveryTarget.sourceNode()); diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index ad3b8006ed0c3..5bc5efc96c661 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -119,6 +119,7 @@ public void testAckedIndexing() throws Exception { assertAcked(prepareCreate("test") .setSettings(Settings.builder() + .put(indexSettings()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2)) )); From 35c4c9efb0705e1e6b080d16d78ed0c5967b80e6 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Tue, 21 May 2019 10:25:23 -0400 Subject: [PATCH 210/321] Re-mute all ml_datafeed_crud rolling upgrade tests AwaitsFix https://github.com/elastic/elasticsearch/issues/42258 Thought this was fixed, but throwing deprecation warnings at an unexpected time so putting this back on mute until we figure it out. --- .../rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml | 5 +++++ .../rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml | 5 +++++ .../test/upgraded_cluster/40_ml_datafeed_crud.yml | 4 ++++ 3 files changed, 14 insertions(+) diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index 2ff9b08e9b13f..4d2254a1ba8c3 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,3 +1,8 @@ +setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + --- "Test old cluster datafeed without aggs": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index 4918dde9ba899..62a9d33a511e6 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -1,3 +1,8 @@ +setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + --- "Put job and datafeed without aggs in old cluster": diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index 5dc71ecb0679e..4b742e10de61f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -1,4 +1,8 @@ setup: + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42258" + - do: cluster.health: wait_for_status: green From c9d04ccb3a13eeaccdf056df0243477d06da013d Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Wed, 22 May 2019 22:00:51 +0300 Subject: [PATCH 211/321] Make packer cache branches explicit (#41990) Before this change we would recurse to cache bwc versions. This proved to be problematic due to the number of steps it was generating taking too long. Also this required tricky maintenance to break the recursion for old branches we don't really care about. With this change we now cache specific branches only. --- .ci/packer_cache.sh | 1 + distribution/bwc/build.gradle | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index 4533213920c3a..adc4f80d4960d 100755 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -22,3 +22,4 @@ export JAVA8_HOME="${HOME}"/.java/java8 export JAVA11_HOME="${HOME}"/.java/java11 export JAVA12_HOME="${HOME}"/.java/openjdk12 ./gradlew --parallel clean --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies + diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 87644fb7f6785..1a4e4161418ab 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -239,12 +239,15 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased createBuildBwcTask(projectName, "${baseDir}/${projectName}", projectArtifact) } - createRunBwcGradleTask("resolveAllBwcDependencies") { args 'resolveAllDependencies' } - resolveAllDependencies.dependsOn resolveAllBwcDependencies + Version currentVersion = Version.fromString(version) + if (currentVersion.getMinor() == 0 && currentVersion.getRevision() == 0) { + // We only want to resolve dependencies for live versions of master, without cascading this to older versions + resolveAllDependencies.dependsOn resolveAllBwcDependencies + } for (e in artifactFiles) { String projectName = e.key From da77b97c56c948fea5909e60170a1680c791ce1b Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 23 May 2019 08:42:06 +0100 Subject: [PATCH 212/321] [ML Data Frame] Account for completed data frames in test (#42351) When asserting on the checkpoint value if the DF has completed the checkpoint will be 1 else 0. Similarly state may be started or indexing. Closes #42309 --- .../rest-api-spec/test/data_frame/transforms_stats.yml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index f552e4710c781..79aa14cb6f628 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -42,9 +42,6 @@ teardown: --- "Test get transform stats": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309" - do: data_frame.get_data_frame_transform_stats: transform_id: "airline-transform-stats" @@ -52,7 +49,7 @@ teardown: - match: { transforms.0.id: "airline-transform-stats" } - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - - match: { transforms.0.state.checkpoint: 0 } + - lte: { transforms.0.state.checkpoint: 1 } - lte: { transforms.0.stats.pages_processed: 1 } - match: { transforms.0.stats.documents_processed: 0 } - match: { transforms.0.stats.documents_indexed: 0 } @@ -149,9 +146,6 @@ teardown: --- "Test get multiple transform stats where one does not have a task": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42309" - do: data_frame.put_data_frame_transform: transform_id: "airline-transform-stats-dos" @@ -169,7 +163,7 @@ teardown: transform_id: "*" - match: { count: 2 } - match: { transforms.0.id: "airline-transform-stats" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.1.id: "airline-transform-stats-dos" } - match: { transforms.1.state.indexer_state: "stopped" } From e75ff0c748e6b68232c2b08e19ac4a4934918264 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Thu, 23 May 2019 10:10:07 +0200 Subject: [PATCH 213/321] Allow `fields` to be set to `*` (#42301) Allow for SimpleQueryString, QueryString and MultiMatchQuery to set the `fields` parameter to the wildcard `*`. If so, set the leniency to `true`, to achieve the same behaviour as from the `"default_field" : "*" setting. Furthermore, check if `*` is in the list of the `default_field` but not necessarily as the 1st element. Closes: #39577 --- .../index/query/MultiMatchQueryBuilder.java | 15 +- .../index/query/QueryStringQueryBuilder.java | 9 +- .../index/query/SimpleQueryStringBuilder.java | 12 +- .../index/search/QueryParserHelper.java | 8 ++ .../query/MultiMatchQueryBuilderTests.java | 128 ++++++++++++------ .../query/QueryStringQueryBuilderTests.java | 55 +++++++- .../query/SimpleQueryStringBuilderTests.java | 79 ++++++++--- 7 files changed, 227 insertions(+), 79 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index 5537df2fdf874..7827c032ea0d7 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -783,18 +782,20 @@ protected Query doToQuery(QueryShardContext context) throws IOException { multiMatchQuery.setTranspositions(fuzzyTranspositions); Map newFieldsBoosts; + boolean isAllField; if (fieldsBoosts.isEmpty()) { // no fields provided, defaults to index.query.default_field List defaultFields = context.defaultFields(); - boolean isAllField = defaultFields.size() == 1 && Regex.isMatchAllPattern(defaultFields.get(0)); - if (isAllField && lenient == null) { - // Sets leniency to true if not explicitly - // set in the request - multiMatchQuery.setLenient(true); - } newFieldsBoosts = QueryParserHelper.resolveMappingFields(context, QueryParserHelper.parseFieldsAndWeights(defaultFields)); + isAllField = QueryParserHelper.hasAllFieldsWildcard(defaultFields); } else { newFieldsBoosts = QueryParserHelper.resolveMappingFields(context, fieldsBoosts); + isAllField = QueryParserHelper.hasAllFieldsWildcard(fieldsBoosts.keySet()); + } + if (isAllField && lenient == null) { + // Sets leniency to true if not explicitly + // set in the request + multiMatchQuery.setLenient(true); } return multiMatchQuery.parse(type, newFieldsBoosts, value, minimumShouldMatch); } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index f129ccbec7254..1d1d139ceef1c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -847,11 +847,14 @@ protected Query doToQuery(QueryShardContext context) throws IOException { } } else if (fieldsAndWeights.size() > 0) { final Map resolvedFields = QueryParserHelper.resolveMappingFields(context, fieldsAndWeights); - queryParser = new QueryStringQueryParser(context, resolvedFields, isLenient); + if (QueryParserHelper.hasAllFieldsWildcard(fieldsAndWeights.keySet())) { + queryParser = new QueryStringQueryParser(context, resolvedFields, lenient == null ? true : lenient); + } else { + queryParser = new QueryStringQueryParser(context, resolvedFields, isLenient); + } } else { List defaultFields = context.defaultFields(); - boolean isAllField = defaultFields.size() == 1 && Regex.isMatchAllPattern(defaultFields.get(0)); - if (isAllField) { + if (QueryParserHelper.hasAllFieldsWildcard(defaultFields)) { queryParser = new QueryStringQueryParser(context, lenient == null ? true : lenient); } else { final Map resolvedFields = QueryParserHelper.resolveMappingFields(context, diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index bd74d34196345..beae19a4403ac 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.search.QueryParserHelper; @@ -399,16 +398,19 @@ public SimpleQueryStringBuilder fuzzyTranspositions(boolean fuzzyTranspositions) protected Query doToQuery(QueryShardContext context) throws IOException { Settings newSettings = new Settings(settings); final Map resolvedFieldsAndWeights; + boolean isAllField; if (fieldsAndWeights.isEmpty() == false) { resolvedFieldsAndWeights = QueryParserHelper.resolveMappingFields(context, fieldsAndWeights); + isAllField = QueryParserHelper.hasAllFieldsWildcard(fieldsAndWeights.keySet()); } else { List defaultFields = context.defaultFields(); - boolean isAllField = defaultFields.size() == 1 && Regex.isMatchAllPattern(defaultFields.get(0)); - if (isAllField) { - newSettings.lenient(lenientSet ? settings.lenient() : true); - } resolvedFieldsAndWeights = QueryParserHelper.resolveMappingFields(context, QueryParserHelper.parseFieldsAndWeights(defaultFields)); + isAllField = QueryParserHelper.hasAllFieldsWildcard(defaultFields); + } + + if (isAllField) { + newSettings.lenient(lenientSet ? settings.lenient() : true); } final SimpleQueryStringQueryParser sqp; diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java index adc1691608b23..3acf2929687c5 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java @@ -161,4 +161,12 @@ private static void checkForTooManyFields(Map fields, QueryShardC throw new IllegalArgumentException("field expansion matches too many fields, limit: " + limit + ", got: " + fields.size()); } } + + /** + * Returns true if any of the fields is the wildcard {@code *}, false otherwise. + * @param fields A collection of field names + */ + public static boolean hasAllFieldsWildcard(Collection fields) { + return fields.stream().anyMatch(Regex::isMatchAllPattern); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index ab9b3c732135d..6590a5609353a 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -55,6 +55,7 @@ import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.collection.IsCollectionWithSize.hasSize; @@ -409,52 +410,79 @@ public void testToFuzzyQuery() throws Exception { public void testDefaultField() throws Exception { QueryShardContext context = createShardContext(); MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("hello"); - // should pass because we set lenient to true when default field is `*` + // default value `*` sets leniency to true Query query = builder.toQuery(context); - assertThat(query, instanceOf(DisjunctionMaxQuery.class)); - - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), - Settings.builder().putList("index.query.default_field", STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5") - .build()) - ); - - MultiMatchQueryBuilder qb = new MultiMatchQueryBuilder("hello"); - query = qb.toQuery(context); - DisjunctionMaxQuery expected = new DisjunctionMaxQuery( - Arrays.asList( - new TermQuery(new Term(STRING_FIELD_NAME, "hello")), - new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f) - ), 0.0f - ); - assertEquals(expected, query); + assertQueryWithAllFieldsWildcard(query); + + try { + // `*` is in the list of the default_field => leniency set to true + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, "*", STRING_FIELD_NAME_2).build()) + ); + query = new MultiMatchQueryBuilder("hello") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), + Settings.builder().putList("index.query.default_field", STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5") + .build()) + ); + MultiMatchQueryBuilder qb = new MultiMatchQueryBuilder("hello"); + query = qb.toQuery(context); + DisjunctionMaxQuery expected = new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f) + ), 0.0f + ); + assertEquals(expected, query); + + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), + Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5", INT_FIELD_NAME).build()) + ); + // should fail because lenient defaults to false + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> qb.toQuery(context)); + assertThat(exc, instanceOf(NumberFormatException.class)); + assertThat(exc.getMessage(), equalTo("For input string: \"hello\"")); + + // explicitly sets lenient + qb.lenient(true); + query = qb.toQuery(context); + expected = new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f), + new MatchNoDocsQuery("failed [mapped_int] query, caused by number_format_exception:[For input string: \"hello\"]") + ), 0.0f + ); + assertEquals(expected, query); + + } finally { + // Reset to the default value + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), + Settings.builder().putNull("index.query.default_field").build()) + ); + } + } - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), - Settings.builder().putList("index.query.default_field", - STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5", INT_FIELD_NAME).build()) - ); - // should fail because lenient defaults to false - IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> qb.toQuery(context)); - assertThat(exc, instanceOf(NumberFormatException.class)); - assertThat(exc.getMessage(), equalTo("For input string: \"hello\"")); - - // explicitly sets lenient - qb.lenient(true); - query = qb.toQuery(context); - expected = new DisjunctionMaxQuery( - Arrays.asList( - new TermQuery(new Term(STRING_FIELD_NAME, "hello")), - new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f), - new MatchNoDocsQuery("failed [mapped_int] query, caused by number_format_exception:[For input string: \"hello\"]") - ), 0.0f - ); - assertEquals(expected, query); + public void testAllFieldsWildcard() throws Exception { + QueryShardContext context = createShardContext(); + Query query = new MultiMatchQueryBuilder("hello") + .field("*") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), - Settings.builder().putNull("index.query.default_field").build()) - ); + query = new MultiMatchQueryBuilder("hello") + .field(STRING_FIELD_NAME) + .field("*") + .field(STRING_FIELD_NAME_2) + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); } public void testWithStopWords() throws Exception { @@ -536,4 +564,18 @@ private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings .build(); return IndexMetaData.builder(name).settings(build).build(); } + + private void assertQueryWithAllFieldsWildcard(Query query) { + assertEquals(DisjunctionMaxQuery.class, query.getClass()); + DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query; + int noMatchNoDocsQueries = 0; + for (Query q : disjunctionMaxQuery.getDisjuncts()) { + if (q.getClass() == MatchNoDocsQuery.class) { + noMatchNoDocsQueries++; + } + } + assertEquals(11, noMatchNoDocsQueries); + assertThat(disjunctionMaxQuery.getDisjuncts(), hasItems(new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")))); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 001df6deb5647..ee4e0f9540451 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -79,6 +79,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDisjunctionSubQuery; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @@ -1255,12 +1256,27 @@ public void testUnmappedFieldRewriteToMatchNoDocs() throws IOException { public void testDefaultField() throws Exception { QueryShardContext context = createShardContext(); - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", - STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build()) - ); + // default value `*` sets leniency to true + Query query = new QueryStringQueryBuilder("hello") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + try { - Query query = new QueryStringQueryBuilder("hello") + // `*` is in the list of the default_field => leniency set to true + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, "*", STRING_FIELD_NAME_2).build()) + ); + query = new QueryStringQueryBuilder("hello") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + + + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build()) + ); + query = new QueryStringQueryBuilder("hello") .toQuery(context); Query expected = new DisjunctionMaxQuery( Arrays.asList( @@ -1278,6 +1294,21 @@ public void testDefaultField() throws Exception { } } + public void testAllFieldsWildcard() throws Exception { + QueryShardContext context = createShardContext(); + Query query = new QueryStringQueryBuilder("hello") + .field("*") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + + query = new QueryStringQueryBuilder("hello") + .field(STRING_FIELD_NAME) + .field("*") + .field(STRING_FIELD_NAME_2) + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + } + /** * the quote analyzer should overwrite any other forced analyzer in quoted parts of the query */ @@ -1513,4 +1544,18 @@ private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings .build(); return IndexMetaData.builder(name).settings(build).build(); } + + private void assertQueryWithAllFieldsWildcard(Query query) { + assertEquals(DisjunctionMaxQuery.class, query.getClass()); + DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query; + int noMatchNoDocsQueries = 0; + for (Query q : disjunctionMaxQuery.getDisjuncts()) { + if (q.getClass() == MatchNoDocsQuery.class) { + noMatchNoDocsQueries++; + } + } + assertEquals(11, noMatchNoDocsQueries); + assertThat(disjunctionMaxQuery.getDisjuncts(), hasItems(new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")))); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index 0adac9db8287e..ab479d89fe9d6 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -56,6 +56,7 @@ import java.util.Set; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; @@ -576,24 +577,56 @@ public void testQuoteFieldSuffix() { public void testDefaultField() throws Exception { QueryShardContext context = createShardContext(); - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", - STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build()) - ); + // default value `*` sets leniency to true Query query = new SimpleQueryStringBuilder("hello") .toQuery(context); - Query expected = new DisjunctionMaxQuery( - Arrays.asList( - new TermQuery(new Term(STRING_FIELD_NAME, "hello")), - new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f) - ), 1.0f - ); - assertEquals(expected, query); - // Reset the default value - context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", - context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", "*").build()) - ); + assertQueryWithAllFieldsWildcard(query); + + try { + // `*` is in the list of the default_field => leniency set to true + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, "*", STRING_FIELD_NAME_2).build()) + ); + query = new SimpleQueryStringBuilder("hello") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", + STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build()) + ); + query = new SimpleQueryStringBuilder("hello") + .toQuery(context); + Query expected = new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f) + ), 1.0f + ); + assertEquals(expected, query); + } finally { + // Reset to the default value + context.getIndexSettings().updateIndexMetaData( + newIndexMeta("index", + context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", "*").build()) + ); + } + } + + public void testAllFieldsWildcard() throws Exception { + QueryShardContext context = createShardContext(); + Query query = new SimpleQueryStringBuilder("hello") + .field("*") + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); + + query = new SimpleQueryStringBuilder("hello") + .field(STRING_FIELD_NAME) + .field("*") + .field(STRING_FIELD_NAME_2) + .toQuery(context); + assertQueryWithAllFieldsWildcard(query); } public void testToFuzzyQuery() throws Exception { @@ -739,4 +772,18 @@ private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings .build(); return IndexMetaData.builder(name).settings(build).build(); } + + private void assertQueryWithAllFieldsWildcard(Query query) { + assertEquals(DisjunctionMaxQuery.class, query.getClass()); + DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) query; + int noMatchNoDocsQueries = 0; + for (Query q : disjunctionMaxQuery.getDisjuncts()) { + if (q.getClass() == MatchNoDocsQuery.class) { + noMatchNoDocsQueries++; + } + } + assertEquals(11, noMatchNoDocsQueries); + assertThat(disjunctionMaxQuery.getDisjuncts(), hasItems(new TermQuery(new Term(STRING_FIELD_NAME, "hello")), + new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")))); + } } From f5a6aa7ad7bba178b7ac1d5aa34fac84ab097b43 Mon Sep 17 00:00:00 2001 From: markharwood Date: Thu, 23 May 2019 10:34:13 +0100 Subject: [PATCH 214/321] Test fix - results equality failed because of subtle scoring differences between replicas. (#42366) Diverging merge policies means the segments and therefore scores are not the same. Fixed the test by ensuring there are zero replicas. Closes #32492 --- .../elasticsearch/search/profile/query/QueryProfilerIT.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index 664f5a09fa947..040e16b6e957f 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; @@ -110,7 +111,9 @@ public void testProfileQuery() throws Exception { * to make sure the profiling doesn't interfere with the hits being returned */ public void testProfileMatchesRegular() throws Exception { - createIndex("test"); + createIndex("test", Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0).build()); ensureGreen(); int numDocs = randomIntBetween(100, 150); From 4e999d7514e701c7cf5790e9b484d9f8d7b83297 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 23 May 2019 11:41:05 +0200 Subject: [PATCH 215/321] Upgrade to Lucene 8.1.0 (#42214) This commit upgrades to the GA release of Lucene 8.1.0 --- buildSrc/version.properties | 2 +- ...ene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-expressions-8.1.0.jar.sha1 | 1 + ...e-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-icu-8.1.0.jar.sha1 | 1 + ...lyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 | 1 + ...-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-nori-8.1.0.jar.sha1 | 1 + ...lyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 | 1 + ...alyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 | 1 + ...alyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 | 1 + ...zers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../lucene-analyzers-morfologik-8.1.0.jar.sha1 | 1 + ...nalyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../licenses/lucene-analyzers-common-8.1.0.jar.sha1 | 1 + ...backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 | 1 + .../lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-core-8.1.0.jar.sha1 | 1 + ...lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-grouping-8.1.0.jar.sha1 | 1 + ...ene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-highlighter-8.1.0.jar.sha1 | 1 + .../lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-join-8.1.0.jar.sha1 | 1 + .../lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-memory-8.1.0.jar.sha1 | 1 + .../lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-misc-8.1.0.jar.sha1 | 1 + .../lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-queries-8.1.0.jar.sha1 | 1 + ...ene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-queryparser-8.1.0.jar.sha1 | 1 + .../lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-sandbox-8.1.0.jar.sha1 | 1 + .../lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-spatial-8.1.0.jar.sha1 | 1 + ...-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 | 1 + ...ucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-spatial3d-8.1.0.jar.sha1 | 1 + .../lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - server/licenses/lucene-suggest-8.1.0.jar.sha1 | 1 + .../org/elasticsearch/index/codec/CodecService.java | 3 +-- .../codec/PerFieldMappingPostingFormatCodec.java | 3 +-- .../index/engine/InternalEngineTests.java | 11 ----------- .../elasticsearch/validate/SimpleValidateQueryIT.java | 4 ++-- .../lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 | 1 - .../sql-action/licenses/lucene-core-8.1.0.jar.sha1 | 1 + 53 files changed, 29 insertions(+), 42 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-core-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-grouping-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-join-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-memory-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-misc-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-queries-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-spatial-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-8.1.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 server/licenses/lucene-suggest-8.1.0.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 471cb3a705cf5..a3214c789a47d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.0.0 -lucene = 8.1.0-snapshot-e460356abe +lucene = 8.1.0 bundled_jdk = 12.0.1+12@69cfe15208a647278a19ef0990eea691 diff --git a/modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 48446e877e309..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1addebde14147501b7d24a581a7a7288bc585d \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..2554e8ce52652 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.1.0.jar.sha1 @@ -0,0 +1 @@ +0c98e3b9d25f27ab05ac643cfb28756daa516bc7 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index c03380c6cf36c..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b8b7d744e6294706b379ec7fdd2d6f1b6badc95b \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..e4657681667f1 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.1.0.jar.sha1 @@ -0,0 +1 @@ +d61364290eb1c28696e62b7df3a7d041d3be2fa5 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index e3195509e493f..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c9dcc5568ccd4589f4a6871d2019661546106c83 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..fff37598a0861 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.1.0.jar.sha1 @@ -0,0 +1 @@ +7f78b18890a0a0e74a8249806a6cfcabd2fae304 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 77cd0b32ed9ea..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bef6d901a9c8b4c6139de7419b3024e0c9fd4ad3 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..47b0c633fdc79 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.1.0.jar.sha1 @@ -0,0 +1 @@ +bfc6b5d67a792aa23ee134fe93307696aad94223 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 1f090e9ca523f..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -074c06d6f2254edae04acdd53bf6989a4343acc8 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..d24096b883fc9 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.1.0.jar.sha1 @@ -0,0 +1 @@ +6fac1ff799b86f872b67e7fad55120d338daa86f \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 42a1e22cdfbc0..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5cd2a341ab4524ec7ff40ba29faa4ead5e805413 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..9ed51a53f6226 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.1.0.jar.sha1 @@ -0,0 +1 @@ +72941af5e1bfb012aec04dd518a2deb43402702c \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index c2468bbdd7cac..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba55aba7d278f6201b4ebd6dafbc7edb6fe94f8c \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..190a7031928b8 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.1.0.jar.sha1 @@ -0,0 +1 @@ +0ac885595cfdc0267d7d9cb843c22dabf7215ff0 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 176e9533edde9..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -543d99fd2ba4302f3555792236350b201514d821 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..7f2d4c5e8647e --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.1.0.jar.sha1 @@ -0,0 +1 @@ +e260cff7f48e350e1ec037dec1c260ce05ddb53e \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 08507536ac134..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c20a8ae0c3bd769aa6c415ebea94ba466d9a631d \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 b/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..6eb7722fec744 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.1.0.jar.sha1 @@ -0,0 +1 @@ +262f20cb2786cdf7015a4ba1a64ce90ff2d746f5 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 6b0a3854c6f38..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e8921ab37facdcc5c4b71f2612d72300d6de217 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 b/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..c232e0fbdfdb9 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.1.0.jar.sha1 @@ -0,0 +1 @@ +c5610306f8eff182b399b9aed7a60b82668a8395 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index fea3658cf61bd..0000000000000 --- a/server/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e85f77d8f8ed1db53dba387fbdec55a9f912639 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.1.0.jar.sha1 b/server/licenses/lucene-core-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..4a6aa7b098686 --- /dev/null +++ b/server/licenses/lucene-core-8.1.0.jar.sha1 @@ -0,0 +1 @@ +46d614acdeb42f4661e91347100217bc72aae11e \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 0bcc1ebab16de..0000000000000 --- a/server/licenses/lucene-grouping-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -426a1822d888a6341f6bafccaad19e4a2ad88e25 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.1.0.jar.sha1 b/server/licenses/lucene-grouping-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..f3c49cb193aba --- /dev/null +++ b/server/licenses/lucene-grouping-8.1.0.jar.sha1 @@ -0,0 +1 @@ +443f63d9038eea0601b493fa37fc599d74b035eb \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index b2478a52c7a85..0000000000000 --- a/server/licenses/lucene-highlighter-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f83fa4b264198dfb12436a803309a60f5588481d \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.1.0.jar.sha1 b/server/licenses/lucene-highlighter-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..6b174859e1834 --- /dev/null +++ b/server/licenses/lucene-highlighter-8.1.0.jar.sha1 @@ -0,0 +1 @@ +e3e52591f8d44a4e1006ced4dd4a67f7a572990a \ No newline at end of file diff --git a/server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index ea3f6353ce09e..0000000000000 --- a/server/licenses/lucene-join-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f381131abef51f77d26bccbb213d1c8563c19ec4 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.1.0.jar.sha1 b/server/licenses/lucene-join-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..75232f1fc0a72 --- /dev/null +++ b/server/licenses/lucene-join-8.1.0.jar.sha1 @@ -0,0 +1 @@ +2e885b1e3e55f94ccc2744f85738563a577a4e21 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 0bc96c932c18b..0000000000000 --- a/server/licenses/lucene-memory-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d8733551b9eb71e1f59688b8e78e0b481974d7a \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.1.0.jar.sha1 b/server/licenses/lucene-memory-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..4b2c65af32da5 --- /dev/null +++ b/server/licenses/lucene-memory-8.1.0.jar.sha1 @@ -0,0 +1 @@ +e58d0092da1c4744627d57d022f4e07d8b80d11b \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index fdde3da39a264..0000000000000 --- a/server/licenses/lucene-misc-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13da0b22f01dff4a01c9907425464a440695104b \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.1.0.jar.sha1 b/server/licenses/lucene-misc-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..37afcfadb7e12 --- /dev/null +++ b/server/licenses/lucene-misc-8.1.0.jar.sha1 @@ -0,0 +1 @@ +07833aee2c5feb6fa1a16a21d27c8f15c01d0b4c \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index c50232482b5c1..0000000000000 --- a/server/licenses/lucene-queries-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c3de4dbb98b5cc00875d76e817929374bb9e710 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.1.0.jar.sha1 b/server/licenses/lucene-queries-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..7f09849b67693 --- /dev/null +++ b/server/licenses/lucene-queries-8.1.0.jar.sha1 @@ -0,0 +1 @@ +63096d40298b8b8245a602d344b57bfa14b929fd \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 4a6c53845fc24..0000000000000 --- a/server/licenses/lucene-queryparser-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -539ef199c74ae6891ac93f55632fe140b9d4c291 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.1.0.jar.sha1 b/server/licenses/lucene-queryparser-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..ada3ec974e031 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.1.0.jar.sha1 @@ -0,0 +1 @@ +9bb4fb3c7035a877e4a87ed86870894509d26d65 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 198b93230fb7c..0000000000000 --- a/server/licenses/lucene-sandbox-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0371141f658e2157babd490f0a8ddbcd5114b371 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.1.0.jar.sha1 b/server/licenses/lucene-sandbox-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..422195c73c69d --- /dev/null +++ b/server/licenses/lucene-sandbox-8.1.0.jar.sha1 @@ -0,0 +1 @@ +1033737c97703516134ba4c99d41724729854df4 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index ad6558f167d1c..0000000000000 --- a/server/licenses/lucene-spatial-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1bae56fbce29d6c597c00889dab1909f51f4aaac \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.1.0.jar.sha1 b/server/licenses/lucene-spatial-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..e0d8f362a1ecf --- /dev/null +++ b/server/licenses/lucene-spatial-8.1.0.jar.sha1 @@ -0,0 +1 @@ +968d2fb35b0c2e68ac07c1ec187ab38a74b6602a \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 66d5cc808a1ac..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6eaed1dea9a18502ab9dffe55f081da6060373f7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 b/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..0a45cfe117a3a --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.1.0.jar.sha1 @@ -0,0 +1 @@ +551b7fa327645d3fd59ae1321320153b2f858766 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 0e1c69171e07e..0000000000000 --- a/server/licenses/lucene-spatial3d-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e54c6be78275637544a3080874dd04b0d92755e5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.1.0.jar.sha1 b/server/licenses/lucene-spatial3d-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..9cdde5a308e22 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.1.0.jar.sha1 @@ -0,0 +1 @@ +45e63df708be458e95d9da3e6054189c50c30dff \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 b/server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index 2d1491c40dd0d..0000000000000 --- a/server/licenses/lucene-suggest-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4c95d0bb740f18af520faebcebb968da3e0a687 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.1.0.jar.sha1 b/server/licenses/lucene-suggest-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..c4ac6e68080ab --- /dev/null +++ b/server/licenses/lucene-suggest-8.1.0.jar.sha1 @@ -0,0 +1 @@ +d5cd0e619b473e132f03e3577d1b422f050f99c0 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java index c43f733f916cb..485c40d5d9bbd 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat.FSTLoadMode; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; import org.apache.lucene.codecs.lucene80.Lucene80Codec; import org.elasticsearch.common.Nullable; @@ -49,7 +48,7 @@ public CodecService(@Nullable MapperService mapperService, Logger logger) { final var codecs = new HashMap(); if (mapperService == null) { codecs.put(DEFAULT_CODEC, new Lucene80Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene80Codec(Mode.BEST_COMPRESSION, FSTLoadMode.AUTO)); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene80Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index 705141f1fb925..4a154abd8eadd 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat.FSTLoadMode; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene80.Lucene80Codec; import org.elasticsearch.common.lucene.Lucene; @@ -48,7 +47,7 @@ public class PerFieldMappingPostingFormatCodec extends Lucene80Codec { } public PerFieldMappingPostingFormatCodec(Lucene50StoredFieldsFormat.Mode compressionMode, MapperService mapperService, Logger logger) { - super(compressionMode, FSTLoadMode.AUTO); + super(compressionMode); this.mapperService = mapperService; this.logger = logger; } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 82c4035cfa7db..db9de3765b1e7 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -28,8 +28,6 @@ import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; import org.apache.logging.log4j.core.filter.RegexFilter; -import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat; -import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.document.Field; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.NumericDocValuesField; @@ -133,7 +131,6 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.VersionUtils; import org.hamcrest.MatcherAssert; -import org.hamcrest.Matchers; import java.io.Closeable; import java.io.IOException; @@ -302,14 +299,6 @@ public void testSegments() throws Exception { assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); assertThat(segments.get(0).isCompound(), equalTo(true)); assertThat(segments.get(0).ramTree, nullValue()); - assertThat(segments.get(0).getAttributes().keySet(), - Matchers.contains( - // TODO: Lucene50PostingsFormat#MODE_KEY should be public ? - Lucene50PostingsFormat.class.getSimpleName() + ".fstMode", - Lucene50StoredFieldsFormat.MODE_KEY - ) - ); - engine.flush(); segments = engine.segments(false); diff --git a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 54d9a015b4e4a..5f730ad138f96 100644 --- a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -207,13 +207,13 @@ public void testExplainWithRewriteValidateQuery() throws Exception { // common terms queries assertExplanation(QueryBuilders.commonTermsQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("+field:pidgin (field:huge field:brown)"), true); + containsString("+field:pidgin field:huge field:brown"), true); assertExplanation(QueryBuilders.commonTermsQuery("field", "the brown").analyzer("stop"), containsString("field:brown"), true); // match queries with cutoff frequency assertExplanation(QueryBuilders.matchQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("+field:pidgin (field:huge field:brown)"), true); + containsString("+field:pidgin field:huge field:brown"), true); assertExplanation(QueryBuilders.matchQuery("field", "the brown").analyzer("stop"), containsString("field:brown"), true); diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 deleted file mode 100644 index fea3658cf61bd..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0-snapshot-e460356abe.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e85f77d8f8ed1db53dba387fbdec55a9f912639 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 new file mode 100644 index 0000000000000..4a6aa7b098686 --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.1.0.jar.sha1 @@ -0,0 +1 @@ +46d614acdeb42f4661e91347100217bc72aae11e \ No newline at end of file From 72ab7b6f26b8f76f9534db3ca8ecd6633b271eb9 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 23 May 2019 11:52:39 +0200 Subject: [PATCH 216/321] Rename SearchRequest#crossClusterSearch (#42363) The SearchRequest#crossClusterSearch method is currently used only as part of cross cluster search request, when minimizing roundtrips. It will soon be used also when splitting a search into two: one for throttled and one for non throttled indices. It will probably be used for other usecases as well in the future, hence it makes sense to generalize its name to subSearchRequest. --- .../action/search/SearchRequest.java | 15 ++++++++------- .../action/search/TransportSearchAction.java | 6 +++--- .../action/search/SearchPhaseControllerTests.java | 2 +- .../action/search/SearchRequestTests.java | 14 +++++++------- .../TransportSearchActionSingleNodeTests.java | 14 +++++++------- 5 files changed, 26 insertions(+), 25 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 6b641906d2e32..53dafc153fc4b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -134,9 +134,10 @@ public SearchRequest(String[] indices, SearchSourceBuilder source) { } /** - * Creates a new search request by providing the search request to copy all fields from, the indices to search against, the alias of - * the cluster where it will be executed, as well as the start time in milliseconds from the epoch time and whether the reduction - * should be final or not. Used when a {@link SearchRequest} is created and executed as part of a cross-cluster search request + * Creates a new sub-search request starting from the original search request that is provided. + * For internal use only, allows to fork a search request into multiple search requests that will be executed independently. + * Such requests will not be finally reduced, so that their results can be merged together in one response at completion. + * Used when a {@link SearchRequest} is created and executed as part of a cross-cluster search request * performing reduction on each cluster in order to minimize network round-trips between the coordinating node and the remote clusters. * * @param originalSearchRequest the original search request @@ -145,8 +146,8 @@ public SearchRequest(String[] indices, SearchSourceBuilder source) { * @param absoluteStartMillis the absolute start time to be used on the remote clusters to ensure that the same value is used * @param finalReduce whether the reduction should be final or not */ - static SearchRequest crossClusterSearch(SearchRequest originalSearchRequest, String[] indices, - String clusterAlias, long absoluteStartMillis, boolean finalReduce) { + static SearchRequest subSearchRequest(SearchRequest originalSearchRequest, String[] indices, + String clusterAlias, long absoluteStartMillis, boolean finalReduce) { Objects.requireNonNull(originalSearchRequest, "search request must not be null"); validateIndices(indices); Objects.requireNonNull(clusterAlias, "cluster alias must not be null"); @@ -284,7 +285,7 @@ boolean isFinalReduce() { /** * Returns the current time in milliseconds from the time epoch, to be used for the execution of this search request. Used to * ensure that the same value, determined by the coordinating node, is used on all nodes involved in the execution of the search - * request. When created through {@link #crossClusterSearch(SearchRequest, String[], String, long, boolean)}, this method returns + * request. When created through {@link #subSearchRequest(SearchRequest, String[], String, long, boolean)}, this method returns * the provided current time, otherwise it will return {@link System#currentTimeMillis()}. */ long getOrCreateAbsoluteStartMillis() { @@ -292,7 +293,7 @@ long getOrCreateAbsoluteStartMillis() { } /** - * Returns the provided absoluteStartMillis when created through {@link #crossClusterSearch} and + * Returns the provided absoluteStartMillis when created through {@link #subSearchRequest} and * -1 otherwise. */ long getAbsoluteStartMillis() { diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index d37e10a71f3b9..a7c0a785c7fce 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -270,7 +270,7 @@ static void ccsRemoteReduce(SearchRequest searchRequest, OriginalIndices localIn String clusterAlias = entry.getKey(); boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); OriginalIndices indices = entry.getValue(); - SearchRequest ccsSearchRequest = SearchRequest.crossClusterSearch(searchRequest, indices.indices(), + SearchRequest ccsSearchRequest = SearchRequest.subSearchRequest(searchRequest, indices.indices(), clusterAlias, timeProvider.getAbsoluteStartMillis(), true); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); remoteClusterClient.search(ccsSearchRequest, new ActionListener() { @@ -306,7 +306,7 @@ public void onFailure(Exception e) { String clusterAlias = entry.getKey(); boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); OriginalIndices indices = entry.getValue(); - SearchRequest ccsSearchRequest = SearchRequest.crossClusterSearch(searchRequest, indices.indices(), + SearchRequest ccsSearchRequest = SearchRequest.subSearchRequest(searchRequest, indices.indices(), clusterAlias, timeProvider.getAbsoluteStartMillis(), false); ActionListener ccsListener = createCCSListener(clusterAlias, skipUnavailable, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); @@ -316,7 +316,7 @@ public void onFailure(Exception e) { if (localIndices != null) { ActionListener ccsListener = createCCSListener(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, false, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); - SearchRequest ccsLocalSearchRequest = SearchRequest.crossClusterSearch(searchRequest, localIndices.indices(), + SearchRequest ccsLocalSearchRequest = SearchRequest.subSearchRequest(searchRequest, localIndices.indices(), RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, timeProvider.getAbsoluteStartMillis(), false); localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 084a45267b5c5..3a1adf9748a06 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -330,7 +330,7 @@ private static AtomicArray generateFetchResults(int nShards, } private static SearchRequest randomSearchRequest() { - return randomBoolean() ? new SearchRequest() : SearchRequest.crossClusterSearch(new SearchRequest(), + return randomBoolean() ? new SearchRequest() : SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "remote", 0, randomBoolean()); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 8f1d89a37daaa..06231db26d67e 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -48,21 +48,21 @@ protected SearchRequest createSearchRequest() throws IOException { return request; } //clusterAlias and absoluteStartMillis do not have public getters/setters hence we randomize them only in this test specifically. - return SearchRequest.crossClusterSearch(request, request.indices(), + return SearchRequest.subSearchRequest(request, request.indices(), randomAlphaOfLengthBetween(5, 10), randomNonNegativeLong(), randomBoolean()); } public void testWithLocalReduction() { - expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(null, Strings.EMPTY_ARRAY, "", 0, randomBoolean())); + expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(null, Strings.EMPTY_ARRAY, "", 0, randomBoolean())); SearchRequest request = new SearchRequest(); - expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(request, null, "", 0, randomBoolean())); - expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(request, + expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(request, null, "", 0, randomBoolean())); + expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(request, new String[]{null}, "", 0, randomBoolean())); - expectThrows(NullPointerException.class, () -> SearchRequest.crossClusterSearch(request, + expectThrows(NullPointerException.class, () -> SearchRequest.subSearchRequest(request, Strings.EMPTY_ARRAY, null, 0, randomBoolean())); - expectThrows(IllegalArgumentException.class, () -> SearchRequest.crossClusterSearch(request, + expectThrows(IllegalArgumentException.class, () -> SearchRequest.subSearchRequest(request, Strings.EMPTY_ARRAY, "", -1, randomBoolean())); - SearchRequest searchRequest = SearchRequest.crossClusterSearch(request, Strings.EMPTY_ARRAY, "", 0, randomBoolean()); + SearchRequest searchRequest = SearchRequest.subSearchRequest(request, Strings.EMPTY_ARRAY, "", 0, randomBoolean()); assertNull(searchRequest.validate()); } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java index 82f7c513bf0ce..10f252c30dc3b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionSingleNodeTests.java @@ -46,7 +46,7 @@ public void testLocalClusterAlias() { assertEquals(RestStatus.CREATED, indexResponse.status()); { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), Strings.EMPTY_ARRAY, + SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "local", nowInMillis, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(1, searchResponse.getHits().getTotalHits().value); @@ -58,7 +58,7 @@ public void testLocalClusterAlias() { assertEquals("1", hit.getId()); } { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), Strings.EMPTY_ARRAY, + SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "", nowInMillis, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(1, searchResponse.getHits().getTotalHits().value); @@ -100,13 +100,13 @@ public void testAbsoluteStartMillis() { assertEquals(0, searchResponse.getTotalShards()); } { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), + SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0, randomBoolean()); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(2, searchResponse.getHits().getTotalHits().value); } { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), + SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0, randomBoolean()); searchRequest.indices(""); SearchResponse searchResponse = client().search(searchRequest).actionGet(); @@ -114,7 +114,7 @@ public void testAbsoluteStartMillis() { assertEquals("test-1970.01.01", searchResponse.getHits().getHits()[0].getIndex()); } { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(new SearchRequest(), + SearchRequest searchRequest = SearchRequest.subSearchRequest(new SearchRequest(), Strings.EMPTY_ARRAY, "", 0, randomBoolean()); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); RangeQueryBuilder rangeQuery = new RangeQueryBuilder("date"); @@ -156,7 +156,7 @@ public void testFinalReduce() { source.aggregation(terms); { - SearchRequest searchRequest = randomBoolean() ? originalRequest : SearchRequest.crossClusterSearch(originalRequest, + SearchRequest searchRequest = randomBoolean() ? originalRequest : SearchRequest.subSearchRequest(originalRequest, Strings.EMPTY_ARRAY, "remote", nowInMillis, true); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(2, searchResponse.getHits().getTotalHits().value); @@ -165,7 +165,7 @@ public void testFinalReduce() { assertEquals(1, longTerms.getBuckets().size()); } { - SearchRequest searchRequest = SearchRequest.crossClusterSearch(originalRequest, + SearchRequest searchRequest = SearchRequest.subSearchRequest(originalRequest, Strings.EMPTY_ARRAY, "remote", nowInMillis, false); SearchResponse searchResponse = client().search(searchRequest).actionGet(); assertEquals(2, searchResponse.getHits().getTotalHits().value); From 5da6f5dfbfcce23ff5ea0b4131887792e24d37bd Mon Sep 17 00:00:00 2001 From: jimczi Date: Thu, 23 May 2019 12:18:11 +0200 Subject: [PATCH 217/321] upgrade Lucene Version for ES 7.3.0 after backport of #42214 --- server/src/main/java/org/elasticsearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index e3381a3384c0e..7f939ca627a95 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -93,7 +93,7 @@ public class Version implements Comparable, ToXContentFragment { public static final int V_7_2_0_ID = 7020099; public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_3_0_ID = 7030099; - public static final Version V_7_3_0 = new Version(V_7_3_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_3_0 = new Version(V_7_3_0_ID, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final int V_8_0_0_ID = 8000099; public static final Version V_8_0_0 = new Version(V_8_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version CURRENT = V_8_0_0; From cb402220d88127b35152f4567beec41b219b96d3 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 23 May 2019 12:29:39 +0200 Subject: [PATCH 218/321] Remove deprecated Repository methods (#42359) We deprecated `restoreShard` and `snapshotShard` in #42213 This change removes the deprecated methods and their usage and adds a note in the migration docs. --- .../migration/migrate_8_0/java.asciidoc | 8 ++++ .../index/shard/StoreRecovery.java | 2 +- .../repositories/Repository.java | 41 ------------------- .../snapshots/SnapshotShardsService.java | 3 +- 4 files changed, 11 insertions(+), 43 deletions(-) diff --git a/docs/reference/migration/migrate_8_0/java.asciidoc b/docs/reference/migration/migrate_8_0/java.asciidoc index 523e5b463d8bc..21d281acff97f 100644 --- a/docs/reference/migration/migrate_8_0/java.asciidoc +++ b/docs/reference/migration/migrate_8_0/java.asciidoc @@ -25,3 +25,11 @@ while silently truncating them to one of the three allowed edit distances 0, 1 or 2. This leniency is now removed and the class will throw errors when trying to construct an instance with another value (e.g. floats like 1.3 used to get accepted but truncated to 1). You should use one of the allowed values. + + +[float] +==== Changes to Repository + +Repository has no dependency on IndexShard anymore. The contract of restoreShard +and snapshotShard has been reduced to Store and MappingService in order to improve +testability. diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index aa49f7ecb60ce..fae3703027f9e 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -469,7 +469,7 @@ private void restore(final IndexShard indexShard, final Repository repository, f } final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName); assert indexShard.getEngineOrNull() == null; - repository.restoreShard(indexShard, indexShard.store(), restoreSource.snapshot().getSnapshotId(), + repository.restoreShard(indexShard.store(), restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState()); final Store store = indexShard.store(); store.bootstrapNewHistory(); diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 3aa19cb130cae..0eca92039fbf8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; @@ -189,27 +188,6 @@ SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long */ boolean isReadOnly(); - /** - * Creates a snapshot of the shard based on the index commit point. - *

- * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#acquireLastIndexCommit} method. - * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller. - *

- * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check - * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. - * @param indexShard the shard to be snapshotted - * @param snapshotId snapshot id - * @param indexId id for the index being snapshotted - * @param snapshotIndexCommit commit point - * @param snapshotStatus snapshot status - * @deprecated use {@link #snapshotShard(Store, MapperService, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} instead - */ - @Deprecated - default void snapshotShard(IndexShard indexShard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { - snapshotShard(indexShard.store(), indexShard.mapperService(), snapshotId, indexId, snapshotIndexCommit, snapshotStatus); - } - /** * Creates a snapshot of the shard based on the index commit point. *

@@ -228,25 +206,6 @@ default void snapshotShard(IndexShard indexShard, SnapshotId snapshotId, IndexId void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus); - /** - * Restores snapshot of the shard. - *

- * The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied. - * @param shard the shard to restore the index into - * @param store the store to restore the index into - * @param snapshotId snapshot id - * @param version version of elasticsearch that created this snapshot - * @param indexId id of the index in the repository from which the restore is occurring - * @param snapshotShardId shard id (in the snapshot) - * @param recoveryState recovery state - * @deprecated use {@link #restoreShard(Store, SnapshotId, Version, IndexId, ShardId, RecoveryState)} instead - */ - @Deprecated - default void restoreShard(IndexShard shard, Store store, SnapshotId snapshotId, Version version, IndexId indexId, - ShardId snapshotShardId, RecoveryState recoveryState) { - restoreShard(store, snapshotId, version, indexId, snapshotShardId, recoveryState); - } - /** * Restores snapshot of the shard. *

diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index f79b6da6ef626..b21df093fadd2 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -367,7 +367,8 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina try { // we flush first to make sure we get the latest writes snapshotted try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) { - repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus); + repository.snapshotShard(indexShard.store(), indexShard.mapperService(), snapshot.getSnapshotId(), indexId, + snapshotRef.getIndexCommit(), snapshotStatus); if (logger.isDebugEnabled()) { final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus); From 2721326d576ea8e2feaf278480580d2d83f29628 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 23 May 2019 05:15:40 -0700 Subject: [PATCH 219/321] Remove old assertion in resync replication request (#42390) This assertion was left behind from a previous cleanup. The assertion was there to remove some stale logic not needed when master would not talk to 6.x anymore. When that logic was removed, this assertion was left behind. This commit removes that stale assertion. --- .../elasticsearch/action/resync/ResyncReplicationRequest.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java index f19bfe3ac6952..78b87435a4f34 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java @@ -38,8 +38,6 @@ public final class ResyncReplicationRequest extends ReplicatedWriteRequest Date: Thu, 23 May 2019 07:54:00 -0500 Subject: [PATCH 220/321] Bulk processor concurrent requests (#41451) `org.elasticsearch.action.bulk.BulkProcessor` is a threadsafe class that allows for simple semantics to deal with sending bulk requests. Once a bulk reaches it's pre-defined size, documents, or flush interval it will execute sending the bulk. One configurable option is the number of concurrent outstanding bulk requests. That concurrency is implemented in `org.elasticsearch.action.bulk.BulkRequestHandler` via a semaphore. However, the only code that currently calls into this code is blocked by `synchronized` methods. This results in the in-ability for the BulkProcessor to behave concurrently despite supporting configurable amounts of concurrent requests. This change removes the `synchronized` method in favor an explicit lock around the non-thread safe parts of the method. The call into `org.elasticsearch.action.bulk.BulkRequestHandler` is no longer blocking, which allows `org.elasticsearch.action.bulk.BulkRequestHandler` to handle it's own concurrency. --- .../action/bulk/BulkProcessor.java | 108 +++++--- .../action/bulk/BulkProcessorTests.java | 251 +++++++++++++++++- 2 files changed, 328 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index b0ad87a8b744a..08c42c5ea40de 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -26,6 +26,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -39,6 +40,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiConsumer; import java.util.function.Supplier; @@ -225,6 +227,7 @@ private static Scheduler buildScheduler(ScheduledThreadPoolExecutor scheduledThr private final Runnable onClose; private volatile boolean closed = false; + private final ReentrantLock lock = new ReentrantLock(); BulkProcessor(BiConsumer> consumer, BackoffPolicy backoffPolicy, Listener listener, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval, @@ -264,21 +267,26 @@ public void close() { * completed * @throws InterruptedException If the current thread is interrupted */ - public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { - if (closed) { - return true; - } - closed = true; + public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + lock.lock(); + try { + if (closed) { + return true; + } + closed = true; - this.cancellableFlushTask.cancel(); + this.cancellableFlushTask.cancel(); - if (bulkRequest.numberOfActions() > 0) { - execute(); - } - try { - return this.bulkRequestHandler.awaitClose(timeout, unit); + if (bulkRequest.numberOfActions() > 0) { + execute(); + } + try { + return this.bulkRequestHandler.awaitClose(timeout, unit); + } finally { + onClose.run(); + } } finally { - onClose.run(); + lock.unlock(); } } @@ -315,10 +323,22 @@ protected void ensureOpen() { } } - private synchronized void internalAdd(DocWriteRequest request) { - ensureOpen(); - bulkRequest.add(request); - executeIfNeeded(); + private void internalAdd(DocWriteRequest request) { + //bulkRequest and instance swapping is not threadsafe, so execute the mutations under a lock. + //once the bulk request is ready to be shipped swap the instance reference unlock and send the local reference to the handler. + Tuple bulkRequestToExecute = null; + lock.lock(); + try { + ensureOpen(); + bulkRequest.add(request); + bulkRequestToExecute = newBulkRequestIfNeeded(); + } finally { + lock.unlock(); + } + //execute sending the local reference outside the lock to allow handler to control the concurrency via it's configuration. + if (bulkRequestToExecute != null) { + execute(bulkRequestToExecute.v1(), bulkRequestToExecute.v2()); + } } /** @@ -332,11 +352,23 @@ public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nu /** * Adds the data from the bytes to be processed by the bulk processor */ - public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, + public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultPipeline, XContentType xContentType) throws Exception { - bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, true, xContentType); - executeIfNeeded(); + Tuple bulkRequestToExecute = null; + lock.lock(); + try { + ensureOpen(); + bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, + true, xContentType); + bulkRequestToExecute = newBulkRequestIfNeeded(); + } finally { + lock.unlock(); + } + + if (bulkRequestToExecute != null) { + execute(bulkRequestToExecute.v1(), bulkRequestToExecute.v2()); + } return this; } @@ -358,23 +390,32 @@ public boolean isCancelled() { return scheduler.scheduleWithFixedDelay(flushRunnable, flushInterval, ThreadPool.Names.GENERIC); } - private void executeIfNeeded() { + // needs to be executed under a lock + private Tuple newBulkRequestIfNeeded(){ ensureOpen(); if (!isOverTheLimit()) { - return; + return null; } - execute(); + final BulkRequest bulkRequest = this.bulkRequest; + this.bulkRequest = bulkRequestSupplier.get(); + return new Tuple<>(bulkRequest,executionIdGen.incrementAndGet()) ; + } + + // may be executed without a lock + private void execute(BulkRequest bulkRequest, long executionId ){ + this.bulkRequestHandler.execute(bulkRequest, executionId); } - // (currently) needs to be executed under a lock + // needs to be executed under a lock private void execute() { final BulkRequest bulkRequest = this.bulkRequest; final long executionId = executionIdGen.incrementAndGet(); this.bulkRequest = bulkRequestSupplier.get(); - this.bulkRequestHandler.execute(bulkRequest, executionId); + execute(bulkRequest, executionId); } + // needs to be executed under a lock private boolean isOverTheLimit() { if (bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) { return true; @@ -388,18 +429,23 @@ private boolean isOverTheLimit() { /** * Flush pending delete or index requests. */ - public synchronized void flush() { - ensureOpen(); - if (bulkRequest.numberOfActions() > 0) { - execute(); + public void flush() { + lock.lock(); + try { + ensureOpen(); + if (bulkRequest.numberOfActions() > 0) { + execute(); + } + } finally { + lock.unlock(); } } class Flush implements Runnable { - @Override public void run() { - synchronized (BulkProcessor.this) { + lock.lock(); + try { if (closed) { return; } @@ -407,6 +453,8 @@ public void run() { return; } execute(); + } finally { + lock.unlock(); } } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java index e2527397a780a..6a58696534ed4 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java @@ -19,26 +19,43 @@ package org.elasticsearch.action.bulk; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; public class BulkProcessorTests extends ESTestCase { private ThreadPool threadPool; + private final Logger logger = LogManager.getLogger(BulkProcessorTests.class); @Before public void startThreadPool() { @@ -90,10 +107,216 @@ public void testBulkProcessorFlushPreservesContext() throws InterruptedException bulkProcessor.close(); } + public void testConcurrentExecutions() throws Exception { + final AtomicBoolean called = new AtomicBoolean(false); + final AtomicReference exceptionRef = new AtomicReference<>(); + int estimatedTimeForTest = Integer.MAX_VALUE; + final int simulateWorkTimeInMillis = 5; + int concurrentClients = 0; + int concurrentBulkRequests = 0; + int expectedExecutions = 0; + int maxBatchSize = 0; + int maxDocuments = 0; + int iterations = 0; + boolean runTest = true; + //find some randoms that allow this test to take under ~ 10 seconds + while (estimatedTimeForTest > 10_000) { + if (iterations++ > 1_000) { //extremely unlikely + runTest = false; + break; + } + maxBatchSize = randomIntBetween(1, 100); + maxDocuments = randomIntBetween(maxBatchSize, 1_000_000); + concurrentClients = randomIntBetween(1, 20); + concurrentBulkRequests = randomIntBetween(0, 20); + expectedExecutions = maxDocuments / maxBatchSize; + estimatedTimeForTest = (expectedExecutions * simulateWorkTimeInMillis) / + Math.min(concurrentBulkRequests + 1, concurrentClients); + } + assumeTrue("failed to find random values that allows test to run quickly", runTest); + BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[]{ new BulkItemResponse() }, 0); + AtomicInteger failureCount = new AtomicInteger(0); + AtomicInteger successCount = new AtomicInteger(0); + AtomicInteger requestCount = new AtomicInteger(0); + AtomicInteger docCount = new AtomicInteger(0); + BiConsumer> consumer = (request, listener) -> + { + try { + Thread.sleep(simulateWorkTimeInMillis); //simulate work + listener.onResponse(bulkResponse); + } catch (InterruptedException e) { + //should never happen + Thread.currentThread().interrupt(); + failureCount.getAndIncrement(); + exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e)); + } + }; + try (BulkProcessor bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), + countingListener(requestCount, successCount, failureCount, docCount, exceptionRef), + concurrentBulkRequests, maxBatchSize, new ByteSizeValue(Integer.MAX_VALUE), null, + (command, delay, executor) -> null, () -> called.set(true), BulkRequest::new)) { + + ExecutorService executorService = Executors.newFixedThreadPool(concurrentClients); + CountDownLatch startGate = new CountDownLatch(1 + concurrentClients); + + IndexRequest indexRequest = new IndexRequest(); + String bulkRequest = "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n" + "{ \"field1\" : \"value1\" }\n"; + BytesReference bytesReference = + BytesReference.fromByteBuffers(new ByteBuffer[]{ ByteBuffer.wrap(bulkRequest.getBytes(StandardCharsets.UTF_8)) }); + List futures = new ArrayList<>(); + for (final AtomicInteger i = new AtomicInteger(0); i.getAndIncrement() < maxDocuments; ) { + futures.add(executorService.submit(() -> { + try { + //don't start any work until all tasks are submitted + startGate.countDown(); + startGate.await(); + //alternate between ways to add to the bulk processor + if (randomBoolean()) { + bulkProcessor.add(indexRequest); + } else { + bulkProcessor.add(bytesReference, null, null, XContentType.JSON); + } + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + })); + } + startGate.countDown(); + startGate.await(); + + for (Future f : futures) { + try { + f.get(); + } catch (Exception e) { + failureCount.incrementAndGet(); + exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e)); + } + } + executorService.shutdown(); + executorService.awaitTermination(10, TimeUnit.SECONDS); + + if (failureCount.get() > 0 || successCount.get() != expectedExecutions || requestCount.get() != successCount.get()) { + if (exceptionRef.get() != null) { + logger.error("exception(s) caught during test", exceptionRef.get()); + } + fail("\nExpected Bulks: " + expectedExecutions + "\n" + + "Requested Bulks: " + requestCount.get() + "\n" + + "Successful Bulks: " + successCount.get() + "\n" + + "Failed Bulks: " + failureCount.get() + "\n" + + "Max Documents: " + maxDocuments + "\n" + + "Max Batch Size: " + maxBatchSize + "\n" + + "Concurrent Clients: " + concurrentClients + "\n" + + "Concurrent Bulk Requests: " + concurrentBulkRequests + "\n" + ); + } + } + //count total docs after processor is closed since there may have been partial batches that are flushed on close. + assertEquals(docCount.get(), maxDocuments); + } + + public void testConcurrentExecutionsWithFlush() throws Exception { + final AtomicReference exceptionRef = new AtomicReference<>(); + final int maxDocuments = 100_000; + final int concurrentClients = 2; + final int maxBatchSize = Integer.MAX_VALUE; //don't flush based on size + final int concurrentBulkRequests = randomIntBetween(0, 20); + final int simulateWorkTimeInMillis = 5; + BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[]{ new BulkItemResponse() }, 0); + AtomicInteger failureCount = new AtomicInteger(0); + AtomicInteger successCount = new AtomicInteger(0); + AtomicInteger requestCount = new AtomicInteger(0); + AtomicInteger docCount = new AtomicInteger(0); + BiConsumer> consumer = (request, listener) -> + { + try { + Thread.sleep(simulateWorkTimeInMillis); //simulate work + listener.onResponse(bulkResponse); + } catch (InterruptedException e) { + //should never happen + Thread.currentThread().interrupt(); + failureCount.getAndIncrement(); + exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e)); + } + }; + ScheduledExecutorService flushExecutor = Executors.newScheduledThreadPool(1); + try (BulkProcessor bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), + countingListener(requestCount, successCount, failureCount, docCount, exceptionRef), + concurrentBulkRequests, maxBatchSize, new ByteSizeValue(Integer.MAX_VALUE), + TimeValue.timeValueMillis(simulateWorkTimeInMillis * 2), + (command, delay, executor) -> + Scheduler.wrapAsScheduledCancellable(flushExecutor.schedule(command, delay.millis(), TimeUnit.MILLISECONDS)), + () -> + { + flushExecutor.shutdown(); + try { + flushExecutor.awaitTermination(10L, TimeUnit.SECONDS); + if (flushExecutor.isTerminated() == false) { + flushExecutor.shutdownNow(); + } + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + }, + BulkRequest::new)) { + + ExecutorService executorService = Executors.newFixedThreadPool(concurrentClients); + IndexRequest indexRequest = new IndexRequest(); + String bulkRequest = "{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n" + "{ \"field1\" : \"value1\" }\n"; + BytesReference bytesReference = + BytesReference.fromByteBuffers(new ByteBuffer[]{ ByteBuffer.wrap(bulkRequest.getBytes(StandardCharsets.UTF_8)) }); + List futures = new ArrayList<>(); + CountDownLatch startGate = new CountDownLatch(1 + concurrentClients); + for (final AtomicInteger i = new AtomicInteger(0); i.getAndIncrement() < maxDocuments; ) { + futures.add(executorService.submit(() -> { + try { + //don't start any work until all tasks are submitted + startGate.countDown(); + startGate.await(); + //alternate between ways to add to the bulk processor + if (randomBoolean()) { + bulkProcessor.add(indexRequest); + } else { + bulkProcessor.add(bytesReference, null, null, XContentType.JSON); + } + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + })); + } + startGate.countDown(); + startGate.await(); + + for (Future f : futures) { + try { + f.get(); + } catch (Exception e) { + failureCount.incrementAndGet(); + exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), e)); + } + } + executorService.shutdown(); + executorService.awaitTermination(10, TimeUnit.SECONDS); + } + + if (failureCount.get() > 0 || requestCount.get() != successCount.get() || maxDocuments != docCount.get()) { + if (exceptionRef.get() != null) { + logger.error("exception(s) caught during test", exceptionRef.get()); + } + fail("\nRequested Bulks: " + requestCount.get() + "\n" + + "Successful Bulks: " + successCount.get() + "\n" + + "Failed Bulks: " + failureCount.get() + "\n" + + "Total Documents: " + docCount.get() + "\n" + + "Max Documents: " + maxDocuments + "\n" + + "Max Batch Size: " + maxBatchSize + "\n" + + "Concurrent Clients: " + concurrentClients + "\n" + + "Concurrent Bulk Requests: " + concurrentBulkRequests + "\n" + ); + } + } public void testAwaitOnCloseCallsOnClose() throws Exception { final AtomicBoolean called = new AtomicBoolean(false); - BiConsumer> consumer = (request, listener) -> {}; + BiConsumer> consumer = (request, listener) -> { }; BulkProcessor bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), emptyListener(), 0, 10, new ByteSizeValue(1000), null, (command, delay, executor) -> null, () -> called.set(true), BulkRequest::new); @@ -118,4 +341,30 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) } }; } + + private BulkProcessor.Listener countingListener(AtomicInteger requestCount, AtomicInteger successCount, AtomicInteger failureCount, + AtomicInteger docCount, AtomicReference exceptionRef) { + + return new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + requestCount.incrementAndGet(); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + successCount.incrementAndGet(); + docCount.addAndGet(request.requests().size()); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + if (failure != null) { + failureCount.incrementAndGet(); + exceptionRef.set(ExceptionsHelper.useOrSuppress(exceptionRef.get(), failure)); + + } + } + }; + } } From cbb3bbdd78002a827011003c7ed14446eb3f4148 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 23 May 2019 09:53:16 -0400 Subject: [PATCH 221/321] Prevent normalizer from not being closed on exception (#42375) Currently AnalysisRegistry#processNormalizerFactory creates a normalizer and only later checks whether it should be added to the normalizer map passed in. In case we throw an exception it isn't closed. This can be prevented by moving the check that throws the exception earlier. --- .../org/elasticsearch/index/analysis/AnalysisRegistry.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index c4be6edd49069..d9c4b2c510bc9 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -523,14 +523,14 @@ private void processNormalizerFactory( if (normalizerFactory instanceof CustomNormalizerProvider) { ((CustomNormalizerProvider) normalizerFactory).build(tokenizerName, tokenizerFactory, charFilters, tokenFilters); } + if (normalizers.containsKey(name)) { + throw new IllegalStateException("already registered analyzer with name: " + name); + } Analyzer normalizerF = normalizerFactory.get(); if (normalizerF == null) { throw new IllegalArgumentException("normalizer [" + normalizerFactory.name() + "] created null normalizer"); } NamedAnalyzer normalizer = new NamedAnalyzer(name, normalizerFactory.scope(), normalizerF); - if (normalizers.containsKey(name)) { - throw new IllegalStateException("already registered analyzer with name: " + name); - } normalizers.put(name, normalizer); } } From c459ea828f6419fba0469cc1569c5ead741e7dee Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 23 May 2019 16:02:12 +0200 Subject: [PATCH 222/321] Remove node.max_local_storage_nodes (#42428) This setting, which prior to Elasticsearch 5 was enabled by default and caused all kinds of confusion, has since been disabled by default and is not recommended for production use. The preferred way going forward is for users to explicitly specify separate data folders for each started node to ensure that each node is consistently assigned to the same data path. Relates to #42426 --- docs/reference/commands/node-tool.asciidoc | 6 +- docs/reference/migration/migrate_8_0.asciidoc | 2 + .../migration/migrate_8_0/node.asciidoc | 16 +++ docs/reference/modules/node.asciidoc | 15 --- .../env/NodeEnvironmentEvilTests.java | 4 +- .../ElasticsearchNodeCommand.java | 14 +-- .../common/settings/ClusterSettings.java | 1 - .../elasticsearch/env/NodeEnvironment.java | 97 +++++-------------- .../RemoveCorruptedShardDataCommand.java | 86 +++++++--------- .../elasticsearch/index/shard/ShardPath.java | 7 +- .../UnsafeBootstrapAndDetachCommandIT.java | 12 +-- .../env/NodeEnvironmentTests.java | 37 +------ .../RemoveCorruptedShardDataCommandTests.java | 2 +- 13 files changed, 99 insertions(+), 200 deletions(-) create mode 100644 docs/reference/migration/migrate_8_0/node.asciidoc diff --git a/docs/reference/commands/node-tool.asciidoc b/docs/reference/commands/node-tool.asciidoc index ed810a4dac014..4dd2b0dfe0b6a 100644 --- a/docs/reference/commands/node-tool.asciidoc +++ b/docs/reference/commands/node-tool.asciidoc @@ -13,7 +13,7 @@ with the data on disk. [source,shell] -------------------------------------------------- bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster|override-version - [--ordinal ] [-E ] + [-E ] [-h, --help] ([-s, --silent] | [-v, --verbose]) -------------------------------------------------- @@ -290,10 +290,6 @@ it can join a different cluster. `override-version`:: Overwrites the version number stored in the data path so that a node can start despite being incompatible with the on-disk data. -`--ordinal `:: If there is <> then this specifies which node to target. Defaults -to `0`, meaning to use the first node in the data path. - `-E `:: Configures a setting. `-h, --help`:: Returns all of the command parameters. diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index ed40dddaae28e..84672da61635c 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -20,6 +20,7 @@ coming[8.0.0] * <> * <> * <> +* <> * <> * <> * <> @@ -54,6 +55,7 @@ include::migrate_8_0/security.asciidoc[] include::migrate_8_0/ilm.asciidoc[] include::migrate_8_0/java.asciidoc[] include::migrate_8_0/network.asciidoc[] +include::migrate_8_0/node.asciidoc[] include::migrate_8_0/transport.asciidoc[] include::migrate_8_0/http.asciidoc[] include::migrate_8_0/reindex.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/node.asciidoc b/docs/reference/migration/migrate_8_0/node.asciidoc new file mode 100644 index 0000000000000..a1dcd654807e1 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/node.asciidoc @@ -0,0 +1,16 @@ +[float] +[[breaking_80_node_changes]] +=== Node changes + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + +[float] +==== Removal of `node.max_local_storage_nodes` setting + +The `node.max_local_storage_nodes` setting was deprecated in 7.x and +has been removed in 8.0. Nodes should be run on separate data paths +to ensure that each node is consistently assigned to the same data path. diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index f988e97ef553c..031138dada3f1 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -277,21 +277,6 @@ home directory, so that the home directory can be deleted without deleting your data! The RPM and Debian distributions do this for you already. -[float] -[[max-local-storage-nodes]] -=== `node.max_local_storage_nodes` - -The <> can be shared by multiple nodes, even by nodes from different -clusters. This is very useful for testing failover and different configurations on your development -machine. In production, however, it is recommended to run only one node of Elasticsearch per server. - -By default, Elasticsearch is configured to prevent more than one node from sharing the same data -path. To allow for more than one node (e.g., on your development machine), use the setting -`node.max_local_storage_nodes` and set this to a positive integer larger than one. - -WARNING: Never run different node types (i.e. master, data) from the same data directory. This can -lead to unexpected data loss. - [float] == Other node settings diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java index 57d4a363cc8c7..44d3c2a88a55b 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java @@ -51,10 +51,10 @@ public void testMissingWritePermission() throws IOException { Settings build = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build(); - IOException ioException = expectThrows(IOException.class, () -> { + IOException exception = expectThrows(IOException.class, () -> { new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); }); - assertTrue(ioException.getMessage(), ioException.getMessage().startsWith(path.toString())); + assertTrue(exception.getMessage(), exception.getMessage().startsWith(path.toString())); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index ec664c97067d1..a65934c767769 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -20,7 +20,6 @@ import joptsimple.OptionParser; import joptsimple.OptionSet; -import joptsimple.OptionSpec; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.LockObtainFailedException; @@ -59,22 +58,15 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { static final String NO_GLOBAL_METADATA_MSG = "failed to find global metadata, metadata corrupted?"; static final String WRITE_METADATA_EXCEPTION_MSG = "exception occurred when writing new metadata to disk"; protected static final String ABORTED_BY_USER_MSG = "aborted by user"; - final OptionSpec nodeOrdinalOption; public ElasticsearchNodeCommand(String description) { super(description); - nodeOrdinalOption = parser.accepts("ordinal", "Optional node ordinal, 0 if not specified") - .withRequiredArg().ofType(Integer.class); namedXContentRegistry = new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); } - protected void processNodePathsWithLock(Terminal terminal, OptionSet options, Environment env) throws IOException { + protected void processNodePaths(Terminal terminal, OptionSet options, Environment env) throws IOException { terminal.println(Terminal.Verbosity.VERBOSE, "Obtaining lock for node"); - Integer nodeOrdinal = nodeOrdinalOption.value(options); - if (nodeOrdinal == null) { - nodeOrdinal = 0; - } - try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(nodeOrdinal, logger, env, Files::exists)) { + try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(logger, env, Files::exists)) { final Path[] dataPaths = Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new); if (dataPaths.length == 0) { @@ -118,7 +110,7 @@ protected void confirm(Terminal terminal, String msg) { protected final void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { terminal.println(STOP_WARNING_MSG); if (validateBeforeLock(terminal, env)) { - processNodePathsWithLock(terminal, options, env); + processNodePaths(terminal, options, env); } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 6b50c0f1c112c..e29ceb7372bcf 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -390,7 +390,6 @@ public void apply(Settings value, Settings current, Settings previous) { ThreadContext.DEFAULT_HEADERS_SETTING, Loggers.LOG_DEFAULT_LEVEL_SETTING, Loggers.LOG_LEVEL_SETTING, - NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING, OsService.REFRESH_INTERVAL_SETTING, ProcessService.REFRESH_INTERVAL_SETTING, diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 4cfd22ecb1a65..497c6a9e06459 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -81,7 +81,6 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -91,9 +90,9 @@ */ public final class NodeEnvironment implements Closeable { public static class NodePath { - /* ${data.paths}/nodes/{node.id} */ + /* ${data.paths}/nodes/0 */ public final Path path; - /* ${data.paths}/nodes/{node.id}/indices */ + /* ${data.paths}/nodes/0/indices */ public final Path indicesPath; /** Cached FileStore from path */ public final FileStore fileStore; @@ -152,18 +151,11 @@ public String toString() { private final Path sharedDataPath; private final Lock[] locks; - private final int nodeLockId; private final AtomicBoolean closed = new AtomicBoolean(false); private final Map shardLocks = new HashMap<>(); private final NodeMetaData nodeMetaData; - /** - * Maximum number of data nodes that should run in an environment. - */ - public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 1, 1, - Property.NodeScope); - /** * Seed for determining a persisted unique uuid of this node. If the node has already a persisted uuid on disk, * this seed will be ignored and the uuid from disk will be reused. @@ -184,7 +176,6 @@ public String toString() { public static class NodeLock implements Releasable { - private final int nodeId; private final Lock[] locks; private final NodePath[] nodePaths; @@ -192,17 +183,16 @@ public static class NodeLock implements Releasable { * Tries to acquire a node lock for a node id, throws {@code IOException} if it is unable to acquire it * @param pathFunction function to check node path before attempt of acquiring a node lock */ - public NodeLock(final int nodeId, final Logger logger, + public NodeLock(final Logger logger, final Environment environment, final CheckedFunction pathFunction) throws IOException { - this.nodeId = nodeId; nodePaths = new NodePath[environment.dataFiles().length]; locks = new Lock[nodePaths.length]; try { final Path[] dataPaths = environment.dataFiles(); for (int dirIndex = 0; dirIndex < dataPaths.length; dirIndex++) { Path dataDir = dataPaths[dirIndex]; - Path dir = resolveNodePath(dataDir, nodeId); + Path dir = resolveNodePath(dataDir); if (pathFunction.apply(dir) == false) { continue; } @@ -248,61 +238,35 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce nodePaths = null; sharedDataPath = null; locks = null; - nodeLockId = -1; nodeMetaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT); return; } boolean success = false; - NodeLock nodeLock = null; try { sharedDataPath = environment.sharedDataFile(); - IOException lastException = null; - int maxLocalStorageNodes = MAX_LOCAL_STORAGE_NODES_SETTING.get(settings); - final AtomicReference onCreateDirectoriesException = new AtomicReference<>(); - for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) { - try { - nodeLock = new NodeLock(possibleLockId, logger, environment, - dir -> { - try { - Files.createDirectories(dir); - } catch (IOException e) { - onCreateDirectoriesException.set(e); - throw e; - } - return true; - }); - break; - } catch (LockObtainFailedException e) { - // ignore any LockObtainFailedException - } catch (IOException e) { - if (onCreateDirectoriesException.get() != null) { - throw onCreateDirectoriesException.get(); - } - lastException = e; - } + for (Path path : environment.dataFiles()) { + Files.createDirectories(resolveNodePath(path)); } - if (nodeLock == null) { + final NodeLock nodeLock; + try { + nodeLock = new NodeLock(logger, environment, dir -> true); + } catch (IOException e) { final String message = String.format( Locale.ROOT, - "failed to obtain node locks, tried [%s] with lock id%s;" + - " maybe these locations are not writable or multiple nodes were started without increasing [%s] (was [%d])?", - Arrays.toString(environment.dataFiles()), - maxLocalStorageNodes == 1 ? " [0]" : "s [0--" + (maxLocalStorageNodes - 1) + "]", - MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), - maxLocalStorageNodes); - throw new IllegalStateException(message, lastException); + "failed to obtain node locks, tried %s;" + + " maybe these locations are not writable or multiple nodes were started on the same data path?", + Arrays.toString(environment.dataFiles())); + throw new IllegalStateException(message, e); } + this.locks = nodeLock.locks; this.nodePaths = nodeLock.nodePaths; - this.nodeLockId = nodeLock.nodeId; this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths); - if (logger.isDebugEnabled()) { - logger.debug("using node location [{}], local_lock_id [{}]", nodePaths, nodeLockId); - } + logger.debug("using node location {}", Arrays.toString(nodePaths)); maybeLogPathDetails(); maybeLogHeapDetails(); @@ -334,11 +298,10 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce * Resolve a specific nodes/{node.id} path for the specified path and node lock id. * * @param path the path - * @param nodeLockId the node lock id * @return the resolved path */ - public static Path resolveNodePath(final Path path, final int nodeLockId) { - return path.resolve(NODES_FOLDER).resolve(Integer.toString(nodeLockId)); + public static Path resolveNodePath(final Path path) { + return path.resolve(NODES_FOLDER).resolve("0"); } private void maybeLogPathDetails() throws IOException { @@ -805,14 +768,6 @@ public NodePath[] nodePaths() { return nodePaths; } - public int getNodeLockId() { - assertEnvIsLocked(); - if (nodePaths == null || locks == null) { - throw new IllegalStateException("node is not configured to store local location"); - } - return nodeLockId; - } - /** * Returns all index paths. */ @@ -1137,12 +1092,12 @@ private static boolean isIndexMetaDataPath(Path path) { * * @param indexSettings settings for the index */ - public static Path resolveBaseCustomLocation(IndexSettings indexSettings, Path sharedDataPath, int nodeLockId) { + public static Path resolveBaseCustomLocation(IndexSettings indexSettings, Path sharedDataPath) { String customDataDir = indexSettings.customDataPath(); if (customDataDir != null) { // This assert is because this should be caught by MetaDataCreateIndexService assert sharedDataPath != null; - return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(nodeLockId)); + return sharedDataPath.resolve(customDataDir).resolve("0"); } else { throw new IllegalArgumentException("no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available"); } @@ -1156,11 +1111,11 @@ public static Path resolveBaseCustomLocation(IndexSettings indexSettings, Path s * @param indexSettings settings for the index */ private Path resolveIndexCustomLocation(IndexSettings indexSettings) { - return resolveIndexCustomLocation(indexSettings, sharedDataPath, nodeLockId); + return resolveIndexCustomLocation(indexSettings, sharedDataPath); } - private static Path resolveIndexCustomLocation(IndexSettings indexSettings, Path sharedDataPath, int nodeLockId) { - return resolveBaseCustomLocation(indexSettings, sharedDataPath, nodeLockId).resolve(indexSettings.getUUID()); + private static Path resolveIndexCustomLocation(IndexSettings indexSettings, Path sharedDataPath) { + return resolveBaseCustomLocation(indexSettings, sharedDataPath).resolve(indexSettings.getUUID()); } /** @@ -1172,11 +1127,11 @@ private static Path resolveIndexCustomLocation(IndexSettings indexSettings, Path * @param shardId shard to resolve the path to */ public Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId) { - return resolveCustomLocation(indexSettings, shardId, sharedDataPath, nodeLockId); + return resolveCustomLocation(indexSettings, shardId, sharedDataPath); } - public static Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId, Path sharedDataPath, int nodeLockId) { - return resolveIndexCustomLocation(indexSettings, sharedDataPath, nodeLockId).resolve(Integer.toString(shardId.id())); + public static Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId, Path sharedDataPath) { + return resolveIndexCustomLocation(indexSettings, sharedDataPath).resolve(Integer.toString(shardId.id())); } /** diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java index 7242198633be2..16db596515b4c 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -126,8 +126,6 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen final String indexName; final int shardId; - final int fromNodeId; - final int toNodeId; if (options.has(folderOption)) { final Path path = getPath(folderOption.value(options)).getParent(); @@ -150,8 +148,6 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen ) { shardId = Integer.parseInt(shardIdFileName); indexName = indexMetaData.getIndex().getName(); - fromNodeId = Integer.parseInt(nodeIdFileName); - toNodeId = fromNodeId + 1; } else { throw new ElasticsearchException("Unable to resolve shard id. Wrong folder structure at [ " + path.toString() + " ], expected .../nodes/[NODE-ID]/indices/[INDEX-UUID]/[SHARD-ID]"); @@ -160,59 +156,49 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen // otherwise resolve shardPath based on the index name and shard id indexName = Objects.requireNonNull(indexNameOption.value(options), "Index name is required"); shardId = Objects.requireNonNull(shardIdOption.value(options), "Shard ID is required"); - - // resolve shard path in case of multi-node layout per environment - fromNodeId = 0; - toNodeId = NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.get(settings); } - // have to iterate over possibleLockId as NodeEnvironment; on a contrast to it - we have to fail if node is busy - for (int possibleLockId = fromNodeId; possibleLockId < toNodeId; possibleLockId++) { - try { - try (NodeEnvironment.NodeLock nodeLock = new NodeEnvironment.NodeLock(possibleLockId, logger, environment, Files::exists)) { - final NodeEnvironment.NodePath[] nodePaths = nodeLock.getNodePaths(); - for (NodeEnvironment.NodePath nodePath : nodePaths) { - if (Files.exists(nodePath.indicesPath)) { - // have to scan all index uuid folders to resolve from index name - try (DirectoryStream stream = Files.newDirectoryStream(nodePath.indicesPath)) { - for (Path file : stream) { - if (Files.exists(file.resolve(MetaDataStateFormat.STATE_DIR_NAME)) == false) { - continue; - } - - final IndexMetaData indexMetaData = - IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, file); - if (indexMetaData == null) { - continue; - } - final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); - final Index index = indexMetaData.getIndex(); - if (indexName.equals(index.getName()) == false) { - continue; - } - final ShardId shId = new ShardId(index, shardId); - - final Path shardPathLocation = nodePath.resolve(shId); - if (Files.exists(shardPathLocation) == false) { - continue; - } - final ShardPath shardPath = ShardPath.loadShardPath(logger, shId, indexSettings, - new Path[]{shardPathLocation}, possibleLockId, nodePath.path); - if (shardPath != null) { - consumer.accept(shardPath); - return; - } - } + try (NodeEnvironment.NodeLock nodeLock = new NodeEnvironment.NodeLock(logger, environment, Files::exists)) { + final NodeEnvironment.NodePath[] nodePaths = nodeLock.getNodePaths(); + for (NodeEnvironment.NodePath nodePath : nodePaths) { + if (Files.exists(nodePath.indicesPath)) { + // have to scan all index uuid folders to resolve from index name + try (DirectoryStream stream = Files.newDirectoryStream(nodePath.indicesPath)) { + for (Path file : stream) { + if (Files.exists(file.resolve(MetaDataStateFormat.STATE_DIR_NAME)) == false) { + continue; + } + + final IndexMetaData indexMetaData = + IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, file); + if (indexMetaData == null) { + continue; + } + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + final Index index = indexMetaData.getIndex(); + if (indexName.equals(index.getName()) == false) { + continue; + } + final ShardId shId = new ShardId(index, shardId); + + final Path shardPathLocation = nodePath.resolve(shId); + if (Files.exists(shardPathLocation) == false) { + continue; + } + final ShardPath shardPath = ShardPath.loadShardPath(logger, shId, indexSettings, + new Path[]{shardPathLocation}, nodePath.path); + if (shardPath != null) { + consumer.accept(shardPath); + return; } } } } - } catch (LockObtainFailedException lofe) { - throw new ElasticsearchException("Failed to lock node's directory [" + lofe.getMessage() - + "], is Elasticsearch still running ?"); } + } catch (LockObtainFailedException lofe) { + throw new ElasticsearchException("Failed to lock node's directory [" + lofe.getMessage() + + "], is Elasticsearch still running ?"); } - throw new ElasticsearchException("Unable to resolve shard path for index [" + indexName + "] and shard id [" + shardId + "]"); } public static boolean isCorruptMarkerFileIsPresent(final Directory directory) throws IOException { @@ -238,7 +224,6 @@ protected void dropCorruptMarkerFiles(Terminal terminal, Path path, Directory di terminal); } String[] files = directory.listAll(); - boolean found = false; for (String file : files) { if (file.startsWith(Store.CORRUPTED)) { directory.deleteFile(file); @@ -282,7 +267,6 @@ public void execute(Terminal terminal, OptionSet options, Environment environmen findAndProcessShardPath(options, environment, shardPath -> { final Path indexPath = shardPath.resolveIndex(); final Path translogPath = shardPath.resolveTranslog(); - final Path nodePath = getNodePath(shardPath); if (Files.exists(translogPath) == false || Files.isDirectory(translogPath) == false) { throw new ElasticsearchException("translog directory [" + translogPath + "], must exist and be a directory"); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java index d93cd988c62a7..32d38d9803414 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -118,9 +118,8 @@ public boolean isCustomDataPath() { public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, ShardId shardId, IndexSettings indexSettings) throws IOException { final Path[] paths = env.availableShardPaths(shardId); - final int nodeLockId = env.getNodeLockId(); final Path sharedDataPath = env.sharedDataPath(); - return loadShardPath(logger, shardId, indexSettings, paths, nodeLockId, sharedDataPath); + return loadShardPath(logger, shardId, indexSettings, paths, sharedDataPath); } /** @@ -129,7 +128,7 @@ public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, * Note: this method resolves custom data locations for the shard. */ public static ShardPath loadShardPath(Logger logger, ShardId shardId, IndexSettings indexSettings, Path[] availableShardPaths, - int nodeLockId, Path sharedDataPath) throws IOException { + Path sharedDataPath) throws IOException { final String indexUUID = indexSettings.getUUID(); Path loadedPath = null; for (Path path : availableShardPaths) { @@ -157,7 +156,7 @@ public static ShardPath loadShardPath(Logger logger, ShardId shardId, IndexSetti final Path dataPath; final Path statePath = loadedPath; if (indexSettings.hasCustomDataPath()) { - dataPath = NodeEnvironment.resolveCustomLocation(indexSettings, shardId, sharedDataPath, nodeLockId); + dataPath = NodeEnvironment.resolveCustomLocation(indexSettings, shardId, sharedDataPath); } else { dataPath = statePath; } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 3bbf8378483dd..44f4d7bf4aa53 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -56,10 +56,10 @@ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.cluster.coordination:TRACE") public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase { - private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, int nodeOrdinal, boolean abort) + private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, boolean abort) throws Exception { final MockTerminal terminal = new MockTerminal(); - final OptionSet options = command.getParser().parse("-ordinal", Integer.toString(nodeOrdinal)); + final OptionSet options = command.getParser().parse(); final String input; if (abort) { @@ -80,14 +80,14 @@ private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environmen } private MockTerminal unsafeBootstrap(Environment environment, boolean abort) throws Exception { - final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, 0, abort); + final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, abort); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG)); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); return terminal; } private MockTerminal detachCluster(Environment environment, boolean abort) throws Exception { - final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, 0, abort); + final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, abort); assertThat(terminal.getOutput(), containsString(DetachClusterCommand.CONFIRMATION_MSG)); assertThat(terminal.getOutput(), containsString(DetachClusterCommand.NODE_DETACHED_MSG)); return terminal; @@ -490,7 +490,7 @@ public void testCleanupOldMetaDataFails() throws Exception { protected void cleanUpOldMetaData(Terminal terminal, Path[] dataPaths, long newGeneration) { throw new SimulatedDeleteFailureException(); } - }, environment, 0, false); + }, environment, false); // check original meta-data left untouched. @@ -503,7 +503,7 @@ protected void cleanUpOldMetaData(Terminal terminal, Path[] dataPaths, long newG assertNotEquals(originalMetaData.clusterUUID(), secondMetaData.clusterUUID()); // check that a new run will cleanup. - executeCommand(new UnsafeBootstrapMasterCommand(), environment, 0, false); + executeCommand(new UnsafeBootstrapMasterCommand(), environment, false); assertNull(loadMetaData(dataPaths, namedXContentRegistry, originalManifest)); assertNull(loadMetaData(dataPaths, namedXContentRegistry, secondManifest)); diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 89a10af1a6fc2..f21b55b9aee8f 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -59,24 +59,8 @@ public class NodeEnvironmentTests extends ESTestCase { private final IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("foo", Settings.EMPTY); - public void testNodeLockSillySettings() { - try { - NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.get(Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), between(Integer.MIN_VALUE, 0)).build()); - fail("expected failure"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("must be >= 1")); - } - - // Even though its silly MAXINT nodes is a-ok! - int value = between(1, Integer.MAX_VALUE); - int max = NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.get( - Settings.builder().put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), value).build()); - assertEquals(value, max); - } - - public void testNodeLockSingleEnvironment() throws IOException { - final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 1).build()); + public void testNodeLock() throws IOException { + final Settings settings = buildEnvSettings(Settings.EMPTY); NodeEnvironment env = newNodeEnvironment(settings); List dataPaths = Environment.PATH_DATA_SETTING.get(settings); @@ -118,19 +102,6 @@ public void testSegmentInfosTracing() { } } - public void testNodeLockMultipleEnvironment() throws IOException { - final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 2).build()); - final NodeEnvironment first = newNodeEnvironment(settings); - List dataPaths = Environment.PATH_DATA_SETTING.get(settings); - NodeEnvironment second = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); - assertEquals(first.nodeDataPaths().length, dataPaths.size()); - assertEquals(second.nodeDataPaths().length, dataPaths.size()); - for (int i = 0; i < dataPaths.size(); i++) { - assertEquals(first.nodeDataPaths()[i].getParent(), second.nodeDataPaths()[i].getParent()); - } - IOUtils.close(first, second); - } - public void testShardLock() throws Exception { final NodeEnvironment env = newNodeEnvironment(); @@ -447,7 +418,7 @@ public void testExistingTempFiles() throws IOException { String[] paths = tmpPaths(); // simulate some previous left over temp files for (String path : randomSubsetOf(randomIntBetween(1, paths.length), paths)) { - final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path), 0); + final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path)); Files.createDirectories(nodePath); Files.createFile(nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME)); if (randomBoolean()) { @@ -462,7 +433,7 @@ public void testExistingTempFiles() throws IOException { // check we clean up for (String path: paths) { - final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path), 0); + final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path)); final Path tempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME); assertFalse(tempFile + " should have been cleaned", Files.exists(tempFile)); final Path srcTempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".src"); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 2079b80cd386c..c9a7b236d9c8f 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -91,7 +91,7 @@ public void setup() throws IOException { .putList(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath().toString()).build()); // create same directory structure as prod does - final Path path = NodeEnvironment.resolveNodePath(dataDir, 0); + final Path path = NodeEnvironment.resolveNodePath(dataDir); Files.createDirectories(path); settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) From 13dc1cf6b1983299da34cc22e3fe0e62168a5580 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 23 May 2019 16:02:46 +0200 Subject: [PATCH 223/321] Update max_concurrent_shard_request parameter docs (#42227) Some of the docs were outdated as they did not mention that the limit is not per node. Also, The default value changed. Relates to #31206 --- docs/reference/search.asciidoc | 7 +++---- docs/reference/search/multi-search.asciidoc | 19 ++++++++++--------- .../resources/rest-api-spec/api/msearch.json | 4 ++-- .../resources/rest-api-spec/api/search.json | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index dd7faca60aa92..e99fb6f388d02 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -154,12 +154,11 @@ configure a soft limit, you can update the `action.search.shard_count.limit` cluster setting in order to reject search requests that hit too many shards. The request parameter `max_concurrent_shard_requests` can be used to control the -maximum number of concurrent shard requests the search API will execute for the -request. This parameter should be used to protect a single request from +maximum number of concurrent shard requests the search API will execute per node +for the request. This parameter should be used to protect a single request from overloading a cluster (e.g., a default request will hit all indices in a cluster which could cause shard request rejections if the number of shards per node is -high). This default is based on the number of data nodes in the cluster but at -most `256`. +high). This default value is `5`. -- diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index 34dc37d794cad..87a87c922b37c 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -85,15 +85,16 @@ The msearch's `max_concurrent_searches` request parameter can be used to control the maximum number of concurrent searches the multi search api will execute. This default is based on the number of data nodes and the default search thread pool size. -The request parameter `max_concurrent_shard_requests` can be used to control the -maximum number of concurrent shard requests the each sub search request will execute. -This parameter should be used to protect a single request from overloading a cluster -(e.g., a default request will hit all indices in a cluster which could cause shard request rejections -if the number of shards per node is high). This default is based on the number of -data nodes in the cluster but at most `256`.In certain scenarios parallelism isn't achieved through -concurrent request such that this protection will result in poor performance. For -instance in an environment where only a very low number of concurrent search requests are expected -it might help to increase this value to a higher number. +The request parameter `max_concurrent_shard_requests` can be used to control +the maximum number of concurrent shard requests that each sub search request +will execute per node. This parameter should be used to protect a single +request from overloading a cluster (e.g., a default request will hit all +indices in a cluster which could cause shard request rejections if the number +of shards per node is high). This default value is `5`.In certain scenarios +parallelism isn't achieved through concurrent request such that this protection +will result in poor performance. For instance in an environment where only a +very low number of concurrent search requests are expected it might help to +increase this value to a higher number. [float] [[msearch-security]] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json index 647ed9ed3ac77..73780a1835893 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json @@ -32,8 +32,8 @@ }, "max_concurrent_shard_requests" : { "type" : "number", - "description" : "The number of concurrent shard requests each sub search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", - "default" : "The default grows with the number of nodes in the cluster but is at most 256." + "description" : "The number of concurrent shard requests each sub search executes concurrently per node. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", + "default" : 5 }, "rest_total_hits_as_int" : { "type" : "boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 08d753ee9d558..75444eb66767b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -181,7 +181,7 @@ "max_concurrent_shard_requests" : { "type" : "number", "description" : "The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", - "default" : "The default is 5." + "default" : 5 }, "pre_filter_shard_size" : { "type" : "number", From 733e589bc91355192e867b475e64b073c213fb0c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 23 May 2019 08:05:40 -0700 Subject: [PATCH 224/321] Remove leftover code from one shard by default (#42374) We had some logic to determine the number of shards, it was based on the index version created. Now that master would only ever see index versions created >= 7.0.0, this logic is no longer needed. This commit removes this dead code. --- .../metadata/MetaDataCreateIndexService.java | 19 +------------------ .../MetaDataCreateIndexServiceTests.java | 8 -------- 2 files changed, 1 insertion(+), 26 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 1c9794191bf6e..79af84748ad2d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -386,8 +385,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), createdVersion); } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) { - final int numberOfShards = getNumberOfShards(indexSettingsBuilder); - indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, numberOfShards)); + indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 1)); } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1)); @@ -589,21 +587,6 @@ public ClusterState execute(ClusterState currentState) throws Exception { } } - static int getNumberOfShards(final Settings.Builder indexSettingsBuilder) { - // TODO: this logic can be removed when the current major version is 8 - // TODO: https://github.com/elastic/elasticsearch/issues/38556 - // assert Version.CURRENT.major == 7; - final int numberOfShards; - final Version indexVersionCreated = - Version.fromId(Integer.parseInt(indexSettingsBuilder.get(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey()))); - if (indexVersionCreated.before(Version.V_7_0_0)) { - numberOfShards = 5; - } else { - numberOfShards = 1; - } - return numberOfShards; - } - @Override public void onFailure(String source, Exception e) { if (e instanceof ResourceAlreadyExistsException) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index f83d0aa783c24..3de716acfee44 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -99,14 +99,6 @@ public static boolean isSplitable(int source, int target) { return source * x == target; } - public void testNumberOfShards() { - { - final Version versionCreated = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT); - final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated); - assertThat(MetaDataCreateIndexService.IndexCreationTask.getNumberOfShards(indexSettingsBuilder), equalTo(1)); - } - } - public void testValidateShrinkIndex() { int numShards = randomIntBetween(2, 42); ClusterState state = createClusterState("source", numShards, randomIntBetween(0, 10), From 1b0c728cfa2b6fe536a39430c4c4292f30ec1f3f Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 23 May 2019 08:06:07 -0700 Subject: [PATCH 225/321] Remove deprecated search.remote settings (#42381) We deprecated these settings awhile ago, in favor of cluster.remote. In 7.x we were gentle and provided automatic upgrade of these settings to the new settings. Now it is time for them to go. This commit removes the deprecated search.remote settings. --- docs/reference/migration/migrate_8_0.asciidoc | 2 + .../migration/migrate_8_0/settings.asciidoc | 13 +++ .../FullClusterRestartSettingsUpgradeIT.java | 24 ----- .../common/settings/ClusterSettings.java | 12 +-- .../transport/RemoteClusterAware.java | 101 +----------------- .../transport/RemoteClusterService.java | 62 +---------- .../common/settings/UpgradeSettingsIT.java | 34 ------ .../transport/RemoteClusterServiceTests.java | 34 +----- .../transport/RemoteClusterSettingsTests.java | 75 ------------- .../FullClusterRestartSettingsUpgradeIT.java | 24 ----- 10 files changed, 26 insertions(+), 355 deletions(-) create mode 100644 docs/reference/migration/migrate_8_0/settings.asciidoc delete mode 100644 qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java delete mode 100644 x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index 84672da61635c..b697abf9a9f25 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -24,6 +24,7 @@ coming[8.0.0] * <> * <> * <> +* <> //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide @@ -59,3 +60,4 @@ include::migrate_8_0/node.asciidoc[] include::migrate_8_0/transport.asciidoc[] include::migrate_8_0/http.asciidoc[] include::migrate_8_0/reindex.asciidoc[] +include::migrate_8_0/settings.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/settings.asciidoc b/docs/reference/migration/migrate_8_0/settings.asciidoc new file mode 100644 index 0000000000000..0c21ae4021aa7 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/settings.asciidoc @@ -0,0 +1,13 @@ +[float] +[[breaking_80_settings_changes]] +=== Settings changes + +[float] +[[search-remote-settings-removed]] +==== The `search.remote` settings have been removed + +In 6.5 these settings were deprecated in favor of `cluster.remote`. In 7.x we +provided automatic upgrading of these settings to their `cluster.remote` +counterparts. In 8.0.0, these settings have been removed. Elasticsearch will +refuse to start if you have these settings in your configuration or cluster +state. diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java deleted file mode 100644 index 9e1e5f93fcd92..0000000000000 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.upgrades; - -public class FullClusterRestartSettingsUpgradeIT extends AbstractFullClusterRestartTestCase { - -} diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index e29ceb7372bcf..026dfa4633991 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -280,19 +280,12 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, - RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS, RemoteClusterAware.REMOTE_CLUSTERS_PROXY, - RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, - RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE, RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER, - RemoteClusterService.SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, - RemoteClusterService.SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, RemoteClusterService.REMOTE_NODE_ATTRIBUTE, - RemoteClusterService.SEARCH_REMOTE_NODE_ATTRIBUTE, RemoteClusterService.ENABLE_REMOTE_CLUSTERS, - RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS, RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE, RemoteClusterService.REMOTE_CLUSTER_COMPRESS, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, @@ -451,9 +444,6 @@ public void apply(Settings value, Settings current, Settings previous) { ClusterBootstrapService.UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING, LagDetector.CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING); - static List> BUILT_IN_SETTING_UPGRADERS = List.of( - RemoteClusterAware.SEARCH_REMOTE_CLUSTER_SEEDS_UPGRADER, - RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY_UPGRADER, - RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE_UPGRADER); + static List> BUILT_IN_SETTING_UPGRADERS = Collections.emptyList(); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 0c3874f0a0f35..316fcd275a5a0 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -41,11 +40,8 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; -import java.util.NavigableSet; import java.util.Set; -import java.util.TreeSet; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -55,41 +51,6 @@ */ public abstract class RemoteClusterAware { - static { - // remove search.remote.* settings in 8.0.0 - // TODO https://github.com/elastic/elasticsearch/issues/38556 - // assert Version.CURRENT.major < 8; - } - - public static final Setting.AffixSetting> SEARCH_REMOTE_CLUSTERS_SEEDS = - Setting.affixKeySetting( - "search.remote.", - "seeds", - key -> Setting.listSetting( - key, - Collections.emptyList(), - s -> { - parsePort(s); - return s; - }, - Setting.Property.Deprecated, - Setting.Property.Dynamic, - Setting.Property.NodeScope)); - - public static final SettingUpgrader> SEARCH_REMOTE_CLUSTER_SEEDS_UPGRADER = new SettingUpgrader>() { - - @Override - public Setting> getSetting() { - return SEARCH_REMOTE_CLUSTERS_SEEDS; - } - - @Override - public String getKey(final String key) { - return key.replaceFirst("^search", "cluster"); - } - - }; - /** * A list of initial seed nodes to discover eligible nodes from the remote cluster */ @@ -98,10 +59,7 @@ public String getKey(final String key) { "seeds", key -> Setting.listSetting( key, - // the default needs to be emptyList() when fallback is removed - "_na_".equals(key) - ? SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(key) - : SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSetting(key.replaceAll("^cluster", "search")), + Collections.emptyList(), s -> { // validate seed address parsePort(s); @@ -113,35 +71,6 @@ public String getKey(final String key) { public static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':'; public static final String LOCAL_CLUSTER_GROUP_KEY = ""; - public static final Setting.AffixSetting SEARCH_REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting( - "search.remote.", - "proxy", - key -> Setting.simpleString( - key, - s -> { - if (Strings.hasLength(s)) { - parsePort(s); - } - }, - Setting.Property.Deprecated, - Setting.Property.Dynamic, - Setting.Property.NodeScope), - REMOTE_CLUSTERS_SEEDS); - - public static final SettingUpgrader SEARCH_REMOTE_CLUSTERS_PROXY_UPGRADER = new SettingUpgrader() { - - @Override - public Setting getSetting() { - return SEARCH_REMOTE_CLUSTERS_PROXY; - } - - @Override - public String getKey(final String key) { - return key.replaceFirst("^search", "cluster"); - } - - }; - /** * A proxy address for the remote cluster. */ @@ -150,15 +79,10 @@ public String getKey(final String key) { "proxy", key -> Setting.simpleString( key, - // no default is needed when fallback is removed, use simple string which gives empty - "_na_".equals(key) - ? SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(key) - : SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSetting(key.replaceAll("^cluster", "search")), s -> { if (Strings.hasLength(s)) { parsePort(s); } - return s; }, Setting.Property.Dynamic, Setting.Property.NodeScope), @@ -185,22 +109,8 @@ protected static Map>>>> remoteSeeds = buildRemoteClustersDynamicConfig(settings, REMOTE_CLUSTERS_SEEDS); - final Map>>>> searchRemoteSeeds = - buildRemoteClustersDynamicConfig(settings, SEARCH_REMOTE_CLUSTERS_SEEDS); - // sort the intersection for predictable output order - final NavigableSet intersection = - new TreeSet<>(Arrays.asList( - searchRemoteSeeds.keySet().stream().filter(s -> remoteSeeds.keySet().contains(s)).sorted().toArray(String[]::new))); - if (intersection.isEmpty() == false) { - final String message = String.format( - Locale.ROOT, - "found duplicate remote cluster configurations for cluster alias%s [%s]", - intersection.size() == 1 ? "" : "es", - String.join(",", intersection)); - throw new IllegalArgumentException(message); - } - return Stream - .concat(remoteSeeds.entrySet().stream(), searchRemoteSeeds.entrySet().stream()) + return remoteSeeds.entrySet() + .stream() .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); } @@ -296,11 +206,6 @@ public void listenForUpdates(ClusterSettings clusterSettings) { RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, RemoteClusterService.REMOTE_CLUSTER_COMPRESS, RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE); clusterSettings.addAffixGroupUpdateConsumer(remoteClusterSettings, this::updateRemoteCluster); - clusterSettings.addAffixUpdateConsumer( - RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY, - RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS, - (key, value) -> updateRemoteCluster(key, value.v2(), value.v1()), - (namespace, value) -> {}); } static InetSocketAddress parseSeedAddress(String remoteHost) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 6ab73e8a947fc..4f690d12acf1e 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.CountDown; @@ -70,15 +69,6 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl private static final ActionListener noopListener = ActionListener.wrap((x) -> {}, (x) -> {}); - static { - // remove search.remote.* settings in 8.0.0 - // TODO - // assert Version.CURRENT.major < 8; - } - - public static final Setting SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER = - Setting.intSetting("search.remote.connections_per_cluster", 3, 1, Setting.Property.NodeScope, Setting.Property.Deprecated); - /** * The maximum number of connections that will be established to a remote cluster. For instance if there is only a single * seed node, other nodes will be discovered up to the given number of nodes in this setting. The default is 3. @@ -86,44 +76,27 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl public static final Setting REMOTE_CONNECTIONS_PER_CLUSTER = Setting.intSetting( "cluster.remote.connections_per_cluster", - SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER, // the default needs to three when fallback is removed + 3, 1, Setting.Property.NodeScope); - public static final Setting SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING = - Setting.positiveTimeSetting( - "search.remote.initial_connect_timeout", - TimeValue.timeValueSeconds(30), - Setting.Property.NodeScope, - Setting.Property.Deprecated); - /** * The initial connect timeout for remote cluster connections */ public static final Setting REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING = Setting.positiveTimeSetting( "cluster.remote.initial_connect_timeout", - SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, // the default needs to be thirty seconds when fallback is removed TimeValue.timeValueSeconds(30), Setting.Property.NodeScope); - public static final Setting SEARCH_REMOTE_NODE_ATTRIBUTE = - Setting.simpleString("search.remote.node.attr", Setting.Property.NodeScope, Setting.Property.Deprecated); - /** * The name of a node attribute to select nodes that should be connected to in the remote cluster. * For instance a node can be configured with {@code node.attr.gateway: true} in order to be eligible as a gateway node between - * clusters. In that case {@code search.remote.node.attr: gateway} can be used to filter out other nodes in the remote cluster. + * clusters. In that case {@code cluster.remote.node.attr: gateway} can be used to filter out other nodes in the remote cluster. * The value of the setting is expected to be a boolean, {@code true} for nodes that can become gateways, {@code false} otherwise. */ public static final Setting REMOTE_NODE_ATTRIBUTE = - Setting.simpleString( - "cluster.remote.node.attr", - SEARCH_REMOTE_NODE_ATTRIBUTE, // no default is needed when fallback is removed, use simple string which gives empty - Setting.Property.NodeScope); - - public static final Setting SEARCH_ENABLE_REMOTE_CLUSTERS = - Setting.boolSetting("search.remote.connect", true, Setting.Property.NodeScope, Setting.Property.Deprecated); + Setting.simpleString("cluster.remote.node.attr", Setting.Property.NodeScope); /** * If true connecting to remote clusters is supported on this node. If false this node will not establish @@ -133,40 +106,16 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl public static final Setting ENABLE_REMOTE_CLUSTERS = Setting.boolSetting( "cluster.remote.connect", - SEARCH_ENABLE_REMOTE_CLUSTERS, // the default needs to be true when fallback is removed + true, Setting.Property.NodeScope); - public static final Setting.AffixSetting SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE = - Setting.affixKeySetting( - "search.remote.", - "skip_unavailable", - key -> boolSetting(key, false, Setting.Property.Deprecated, Setting.Property.Dynamic, Setting.Property.NodeScope), - REMOTE_CLUSTERS_SEEDS); - - public static final SettingUpgrader SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE_UPGRADER = new SettingUpgrader() { - - @Override - public Setting getSetting() { - return SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE; - } - - @Override - public String getKey(final String key) { - return key.replaceFirst("^search", "cluster"); - } - - }; - public static final Setting.AffixSetting REMOTE_CLUSTER_SKIP_UNAVAILABLE = Setting.affixKeySetting( "cluster.remote.", "skip_unavailable", key -> boolSetting( key, - // the default needs to be false when fallback is removed - "_na_".equals(key) - ? SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(key) - : SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSetting(key.replaceAll("^cluster", "search")), + false, Setting.Property.Dynamic, Setting.Property.NodeScope), REMOTE_CLUSTERS_SEEDS); @@ -367,7 +316,6 @@ Set getRemoteClusterNames() { public void listenForUpdates(ClusterSettings clusterSettings) { super.listenForUpdates(clusterSettings); clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); - clusterSettings.addAffixUpdateConsumer(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); } private synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) { diff --git a/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java b/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java index 99161f842b7c2..839b96e641870 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.transport.RemoteClusterService; import org.junit.After; import java.util.Arrays; @@ -123,37 +122,4 @@ private void runUpgradeSettingsOnUpdateTest( assertThat(UpgradeSettingsPlugin.newSetting.get(settingsFunction.apply(response.getState().metaData())), equalTo("new." + value)); } - public void testUpgradeRemoteClusterSettings() { - final boolean skipUnavailable = randomBoolean(); - client() - .admin() - .cluster() - .prepareUpdateSettings() - .setPersistentSettings( - Settings.builder() - .put("search.remote.foo.skip_unavailable", skipUnavailable) - .putList("search.remote.foo.seeds", Collections.singletonList("localhost:9200")) - .put("search.remote.foo.proxy", "localhost:9200") - .build()) - .get(); - - final ClusterStateResponse response = client().admin().cluster().prepareState().clear().setMetaData(true).get(); - - final Settings settings = response.getState().metaData().persistentSettings(); - assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); - assertTrue(RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); - assertThat( - RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings), - equalTo(skipUnavailable)); - assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); - assertTrue(RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); - assertThat( - RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings), - equalTo(Collections.singletonList("localhost:9200"))); - assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").exists(settings)); - assertTrue(RemoteClusterService.REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").exists(settings)); - assertThat( - RemoteClusterService.REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").get(settings), equalTo("localhost:9200")); - } - } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index d2c476571c927..1105fe137e322 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -57,12 +57,10 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; public class RemoteClusterServiceTests extends ESTestCase { @@ -128,8 +126,8 @@ public void testBuildRemoteClustersDynamicConfig() throws Exception { .put("cluster.remote.bar.seeds", "[::1]:9090") .put("cluster.remote.boom.seeds", "boom-node1.internal:1000") .put("cluster.remote.boom.proxy", "foo.bar.com:1234") - .put("search.remote.quux.seeds", "quux:9300") - .put("search.remote.quux.proxy", "quux-proxy:19300") + .put("cluster.remote.quux.seeds", "quux:9300") + .put("cluster.remote.quux.proxy", "quux-proxy:19300") .build()); assertThat(map.keySet(), containsInAnyOrder(equalTo("foo"), equalTo("bar"), equalTo("boom"), equalTo("quux"))); assertThat(map.get("foo").v2(), hasSize(1)); @@ -162,34 +160,6 @@ public void testBuildRemoteClustersDynamicConfig() throws Exception { assertEquals(quux.getId(), "quux#quux:9300"); assertEquals("quux-proxy:19300", map.get("quux").v1()); assertEquals(quux.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); - - assertSettingDeprecationsAndWarnings(new String[]{"search.remote.quux.seeds", "search.remote.quux.proxy"}); - } - - public void testBuildRemoteClustersDynamicConfigWithDuplicate() { - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> RemoteClusterService.buildRemoteClustersDynamicConfig( - Settings.builder() - .put("cluster.remote.foo.seeds", "192.168.0.1:8080") - .put("search.remote.foo.seeds", "192.168.0.1:8080") - .build())); - assertThat(e, hasToString(containsString("found duplicate remote cluster configurations for cluster alias [foo]"))); - assertSettingDeprecationsAndWarnings(new String[]{"search.remote.foo.seeds"}); - } - - public void testBuildRemoteClustersDynamicConfigWithDuplicates() { - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> RemoteClusterService.buildRemoteClustersDynamicConfig( - Settings.builder() - .put("cluster.remote.foo.seeds", "192.168.0.1:8080") - .put("search.remote.foo.seeds", "192.168.0.1:8080") - .put("cluster.remote.bar.seeds", "192.168.0.1:8080") - .put("search.remote.bar.seeds", "192.168.0.1:8080") - .build())); - assertThat(e, hasToString(containsString("found duplicate remote cluster configurations for cluster aliases [bar,foo]"))); - assertSettingDeprecationsAndWarnings(new String[]{"search.remote.bar.seeds", "search.remote.foo.seeds"}); } public void testGroupClusterIndices() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java index cfffc3839461e..41df47363b0b6 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java @@ -19,125 +19,50 @@ package org.elasticsearch.transport; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; -import java.util.List; import java.util.concurrent.TimeUnit; import static org.elasticsearch.transport.RemoteClusterAware.REMOTE_CLUSTERS_PROXY; import static org.elasticsearch.transport.RemoteClusterAware.REMOTE_CLUSTERS_SEEDS; -import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY; -import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS; import static org.elasticsearch.transport.RemoteClusterService.ENABLE_REMOTE_CLUSTERS; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_NODE_ATTRIBUTE; -import static org.elasticsearch.transport.RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS; -import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE; -import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER; -import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING; -import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_NODE_ATTRIBUTE; import static org.hamcrest.Matchers.emptyCollectionOf; import static org.hamcrest.Matchers.equalTo; public class RemoteClusterSettingsTests extends ESTestCase { - public void testConnectionsPerClusterFallback() { - final int value = randomIntBetween(1, 8); - final Settings settings = Settings.builder().put(SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER.getKey(), value).build(); - assertThat(REMOTE_CONNECTIONS_PER_CLUSTER.get(settings), equalTo(value)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER}); - } - public void testConnectionsPerClusterDefault() { assertThat(REMOTE_CONNECTIONS_PER_CLUSTER.get(Settings.EMPTY), equalTo(3)); } - public void testInitialConnectTimeoutFallback() { - final String value = randomTimeValue(30, 300, "s"); - final Settings settings = Settings.builder().put(SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.getKey(), value).build(); - assertThat( - REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), - equalTo(TimeValue.parseTimeValue(value, SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.getKey()))); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING}); - } - public void testInitialConnectTimeoutDefault() { assertThat(REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(Settings.EMPTY), equalTo(new TimeValue(30, TimeUnit.SECONDS))); } - public void testRemoteNodeAttributeFallback() { - final String attribute = randomAlphaOfLength(8); - final Settings settings = Settings.builder().put(SEARCH_REMOTE_NODE_ATTRIBUTE.getKey(), attribute).build(); - assertThat(REMOTE_NODE_ATTRIBUTE.get(settings), equalTo(attribute)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_NODE_ATTRIBUTE}); - } - public void testRemoteNodeAttributeDefault() { assertThat(REMOTE_NODE_ATTRIBUTE.get(Settings.EMPTY), equalTo("")); } - public void testEnableRemoteClustersFallback() { - final boolean enable = randomBoolean(); - final Settings settings = Settings.builder().put(SEARCH_ENABLE_REMOTE_CLUSTERS.getKey(), enable).build(); - assertThat(ENABLE_REMOTE_CLUSTERS.get(settings), equalTo(enable)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_ENABLE_REMOTE_CLUSTERS}); - } - public void testEnableRemoteClustersDefault() { assertTrue(ENABLE_REMOTE_CLUSTERS.get(Settings.EMPTY)); } - public void testSkipUnavailableFallback() { - final String alias = randomAlphaOfLength(8); - final boolean skip = randomBoolean(); - final Settings settings = - Settings.builder().put(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).getKey(), skip).build(); - assertThat(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(settings), equalTo(skip)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias)}); - } - public void testSkipUnavailableDefault() { final String alias = randomAlphaOfLength(8); assertFalse(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(Settings.EMPTY)); } - public void testSeedsFallback() { - final String alias = randomAlphaOfLength(8); - final int numberOfSeeds = randomIntBetween(1, 8); - final List seeds = new ArrayList<>(numberOfSeeds); - for (int i = 0; i < numberOfSeeds; i++) { - seeds.add("localhost:" + Integer.toString(9200 + i)); - } - final Settings settings = - Settings.builder() - .put(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).getKey(), String.join(",", seeds)).build(); - assertThat(REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).get(settings), equalTo(seeds)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias)}); - } - public void testSeedsDefault() { final String alias = randomAlphaOfLength(8); assertThat(REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(alias).get(Settings.EMPTY), emptyCollectionOf(String.class)); } - public void testProxyFallback() { - final String alias = randomAlphaOfLength(8); - final String proxy = randomAlphaOfLength(8); - final int port = randomIntBetween(9200, 9300); - final String value = proxy + ":" + port; - final Settings settings = - Settings.builder() - .put(SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).getKey(), value).build(); - assertThat(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).get(settings), equalTo(value)); - assertSettingDeprecationsAndWarnings(new Setting[]{SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias)}); - } - public void testProxyDefault() { final String alias = randomAlphaOfLength(8); assertThat(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(alias).get(Settings.EMPTY), equalTo("")); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java deleted file mode 100644 index a679604a546fc..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.restart; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; - -public class FullClusterRestartSettingsUpgradeIT extends org.elasticsearch.upgrades.FullClusterRestartSettingsUpgradeIT { - - @Override - protected Settings restClientSettings() { - final String token = - "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - -} From 39a3d637340ab8de0220547671e08cc0b37fa326 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 23 May 2019 17:08:29 +0200 Subject: [PATCH 226/321] Unguice Snapshot / Restore services (#42357) This removes the @Inject annotations from the Snapshot/Restore infrastructure classes and registers them manually in Node.java --- .../status/TransportNodesSnapshotsStatus.java | 2 -- .../java/org/elasticsearch/node/Node.java | 21 +++++++++++++++++-- .../repositories/RepositoriesModule.java | 20 ++++++------------ .../snapshots/RestoreService.java | 2 -- .../snapshots/SnapshotShardsService.java | 2 -- .../snapshots/SnapshotsService.java | 2 -- 6 files changed, 25 insertions(+), 24 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 8f71090cc469f..1f55c1e00cef6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -60,7 +59,6 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction repoPlugins, T } } + Settings settings = env.settings(); Map repositoryTypes = Collections.unmodifiableMap(factories); Map internalRepositoryTypes = Collections.unmodifiableMap(internalFactories); - repositoriesService = new RepositoriesService(env.settings(), clusterService, transportService, repositoryTypes, + repositoriesService = new RepositoriesService(settings, clusterService, transportService, repositoryTypes, internalRepositoryTypes, threadPool); } - @Override - protected void configure() { - bind(RepositoriesService.class).toInstance(repositoriesService); - bind(SnapshotsService.class).asEagerSingleton(); - bind(SnapshotShardsService.class).asEagerSingleton(); - bind(TransportNodesSnapshotsStatus.class).asEagerSingleton(); - bind(RestoreService.class).asEagerSingleton(); + public RepositoriesService getRepositoryService() { + return repositoriesService; } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index cb9e7fee04249..f48ea7e41d555 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -58,7 +58,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; @@ -152,7 +151,6 @@ public class RestoreService implements ClusterStateApplier { private final CleanRestoreStateTaskExecutor cleanRestoreStateTaskExecutor; - @Inject public RestoreService(ClusterService clusterService, RepositoriesService repositoriesService, AllocationService allocationService, MetaDataCreateIndexService createIndexService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, ClusterSettings clusterSettings) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index b21df093fadd2..65e1191211ec2 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -49,7 +49,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -115,7 +114,6 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private final SnapshotStateExecutor snapshotStateExecutor = new SnapshotStateExecutor(); private final UpdateSnapshotStatusAction updateSnapshotStatusHandler; - @Inject public SnapshotShardsService(Settings settings, ClusterService clusterService, SnapshotsService snapshotsService, ThreadPool threadPool, TransportService transportService, IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index e606bff0cb9e4..a6138b8f6052b 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -57,7 +57,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -131,7 +130,6 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus // Set of snapshots that are currently being ended by this node private final Set endingSnapshots = Collections.synchronizedSet(new HashSet<>()); - @Inject public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, RepositoriesService repositoriesService, ThreadPool threadPool) { this.clusterService = clusterService; From 9cadfd2b218c79eef20a643385988780b63c10e8 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 23 May 2019 09:16:34 -0700 Subject: [PATCH 227/321] Mute slow and flaky build-tools integration tests --- .../java/org/elasticsearch/gradle/BuildExamplePluginsIT.java | 2 ++ .../elasticsearch/gradle/testclusters/TestClustersPluginIT.java | 1 + 2 files changed, 3 insertions(+) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index bf982fa3aa2d2..7b4b315fd0028 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.GradleRunner; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Rule; import org.junit.rules.TemporaryFolder; @@ -38,6 +39,7 @@ import java.util.Objects; import java.util.stream.Collectors; +@Ignore("https://github.com/elastic/elasticsearch/issues/42453") public class BuildExamplePluginsIT extends GradleIntegrationTestCase { private static final List EXAMPLE_PLUGINS = Collections.unmodifiableList( diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 39651ff896057..a59f54e132073 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -27,6 +27,7 @@ import java.util.Arrays; +@Ignore("https://github.com/elastic/elasticsearch/issues/42453") public class TestClustersPluginIT extends GradleIntegrationTestCase { private GradleRunner runner; From 274b634936d1763daf3e6d1bfe1f0dacb5c4cb34 Mon Sep 17 00:00:00 2001 From: emasab Date: Thu, 23 May 2019 18:35:39 +0200 Subject: [PATCH 228/321] Build local year inside DateFormat lambda bugfix for https://github.com/elastic/elasticsearch/issues/41797 (#42120) This makes sure that the year can change between when the lambda is generated and when it is executed without causing the incorrect year to be used. Resolves #41797 --- .../main/java/org/elasticsearch/ingest/common/DateFormat.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java index 65efdb40a5cc1..be5d7e47f1c02 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java @@ -89,7 +89,6 @@ Function getFunction(String format, ZoneId zoneId, Locale boolean isUtc = ZoneOffset.UTC.equals(zoneId); - int year = LocalDate.now(ZoneOffset.UTC).getYear(); DateFormatter dateFormatter = DateFormatter.forPattern(format) .withLocale(locale); // if UTC zone is set here, the time zone specified in the format will be ignored, leading to wrong dates @@ -102,6 +101,7 @@ Function getFunction(String format, ZoneId zoneId, Locale // if there is no year, we fall back to the current one and // fill the rest of the date up with the parsed date if (accessor.isSupported(ChronoField.YEAR) == false) { + int year = LocalDate.now(ZoneOffset.UTC).getYear(); ZonedDateTime newTime = Instant.EPOCH.atZone(ZoneOffset.UTC).withYear(year); for (ChronoField field : FIELDS) { if (accessor.isSupported(field)) { From 4520e88b2257ec7fc8b8cdeca29cb68d28df0b4b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 23 May 2019 10:16:48 -0700 Subject: [PATCH 229/321] Remove transport client from xpack (#42202) This commit removes support for the transport client from xpack. --- build.gradle | 3 +- .../gradle/test/RestIntegTestTask.groovy | 11 +- .../build.gradle | 2 +- .../example/CustomAuthorizationEngineIT.java | 70 +++--- .../java/org/elasticsearch/node/Node.java | 2 + .../java/org/elasticsearch/node/MockNode.java | 5 + .../elasticsearch/test/ESIntegTestCase.java | 16 +- .../test/ExternalTestCluster.java | 57 +++-- .../ccs-clients-integrations/java.asciidoc | 203 ----------------- x-pack/docs/en/watcher/java.asciidoc | 130 ----------- .../java/org/elasticsearch/xpack/ccr/Ccr.java | 6 - .../xpack/core/XPackClientPlugin.java | 53 +---- .../elasticsearch/xpack/core/XPackPlugin.java | 16 +- .../AbstractLicensesIntegrationTestCase.java | 1 + .../license/StartBasicLicenseTests.java | 2 +- .../license/StartTrialLicenseTests.java | 2 +- .../snapshots/SourceOnlySnapshotIT.java | 2 +- .../integration/DataFrameIntegTestCase.java | 210 +++++++++--------- .../integration/DataFrameTransformIT.java | 24 +- .../DataFrameTransformProgressIT.java | 83 +++---- .../xpack/dataframe/DataFrame.java | 14 +- .../transforms/TransformProgressGatherer.java | 27 ++- .../xpack/indexlifecycle/IndexLifecycle.java | 10 +- .../IndexLifecycleInitialisationTests.java | 20 +- .../xpack/logstash/Logstash.java | 6 - .../ml/integration/MlNativeIntegTestCase.java | 46 +++- .../xpack/ml/MachineLearning.java | 18 +- .../xpack/ml/MachineLearningFeatureSet.java | 3 +- .../xpack/monitoring/Monitoring.java | 8 +- .../MonitoringPluginClientTests.java | 13 -- .../elasticsearch/xpack/rollup/Rollup.java | 10 +- .../xpack/security/Security.java | 52 ++--- .../support/AbstractSecurityModule.java | 47 ---- .../integration/BulkUpdateTests.java | 10 +- .../integration/ClearRolesCacheTests.java | 2 +- .../integration/FieldLevelSecurityTests.java | 2 +- .../MultipleIndicesPermissionsTests.java | 4 +- .../PermissionPrecedenceTests.java | 16 +- .../integration/SecurityClearScrollTests.java | 4 +- .../ShrinkIndexWithSecurityTests.java | 2 +- .../test/NativeRealmIntegTestCase.java | 2 +- .../test/SecurityIntegTestCase.java | 53 +---- .../xpack/security/SecurityTests.java | 12 +- .../xpack/security/TemplateUpgraderTests.java | 2 +- .../AuditTrailSettingsUpdateTests.java | 2 +- .../security/authz/SecurityScrollTests.java | 11 - .../filter/IpFilteringIntegrationTests.java | 6 - .../filter/IpFilteringUpdateTests.java | 2 +- .../netty4/IPHostnameVerificationTests.java | 88 -------- .../xpack/ssl/SSLClientAuthTests.java | 2 +- .../xpack/ssl/SSLTrustRestrictionsTests.java | 11 +- .../elasticsearch/xpack/watcher/Watcher.java | 10 +- .../xpack/security/ReindexWithSecurityIT.java | 188 +++++++++++----- x-pack/qa/security-client-tests/build.gradle | 40 ---- .../qa/SecurityTransportClientIT.java | 125 ----------- .../build.gradle | 2 +- .../example/realm/CustomRealmIT.java | 108 ++------- .../example/role/CustomRolesProviderIT.java | 69 +++--- .../smoketest/PreventFailingBuildIT.java | 4 +- x-pack/qa/smoke-test-plugins-ssl/build.gradle | 1 + .../SmokeTestMonitoringWithSecurityIT.java | 171 +++++++++----- x-pack/qa/transport-client-tests/build.gradle | 22 -- .../ml/client/ESXPackSmokeClientTestCase.java | 153 ------------- .../xpack/ml/client/MLTransportClientIT.java | 179 --------------- x-pack/transport-client/build.gradle | 41 ---- .../client/PreBuiltXPackTransportClient.java | 66 ------ .../PreBuiltXPackTransportClientTests.java | 30 --- 67 files changed, 699 insertions(+), 1913 deletions(-) delete mode 100644 x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc delete mode 100644 x-pack/docs/en/watcher/java.asciidoc delete mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/AbstractSecurityModule.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IPHostnameVerificationTests.java delete mode 100644 x-pack/qa/security-client-tests/build.gradle delete mode 100644 x-pack/qa/security-client-tests/src/test/java/org/elasticsearch/xpack/security/qa/SecurityTransportClientIT.java delete mode 100644 x-pack/qa/transport-client-tests/build.gradle delete mode 100644 x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/ESXPackSmokeClientTestCase.java delete mode 100644 x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/MLTransportClientIT.java delete mode 100644 x-pack/transport-client/build.gradle delete mode 100644 x-pack/transport-client/src/main/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClient.java delete mode 100644 x-pack/transport-client/src/test/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClientTests.java diff --git a/build.gradle b/build.gradle index 8794a1f930523..7de02b814da86 100644 --- a/build.gradle +++ b/build.gradle @@ -241,8 +241,7 @@ allprojects { "org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator', "org.elasticsearch.plugin:rank-eval-client:${version}": ':modules:rank-eval', // for security example plugins - "org.elasticsearch.plugin:x-pack-core:${version}": ':x-pack:plugin:core', - "org.elasticsearch.client:x-pack-transport:${version}": ':x-pack:transport-client' + "org.elasticsearch.plugin:x-pack-core:${version}": ':x-pack:plugin:core' ] /* diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index ef784b6f901d1..52c498aa98d79 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -114,13 +114,14 @@ class RestIntegTestTask extends DefaultTask { runner.ext.nonInputProperties = nonInputProperties if (System.getProperty("tests.rest.cluster") == null) { - if (System.getProperty("tests.cluster") != null) { - throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null") + if (System.getProperty("tests.cluster") != null || System.getProperty("tests.clustername") != null) { + throw new IllegalArgumentException("tests.rest.cluster, tests.cluster, and tests.clustername must all be null or non-null") } if (usesTestclusters == true) { ElasticsearchCluster cluster = project.testClusters."${name}" nonInputProperties.systemProperty('tests.rest.cluster', "${-> cluster.allHttpSocketURI.join(",") }") nonInputProperties.systemProperty('tests.cluster', "${-> cluster.transportPortURI }") + nonInputProperties.systemProperty('tests.clustername', "${-> cluster.getName() }") } else { // we pass all nodes to the rest cluster to allow the clients to round-robin between them // this is more realistic than just talking to a single node @@ -130,6 +131,7 @@ class RestIntegTestTask extends DefaultTask { // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass // both as separate sysprops nonInputProperties.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") + nonInputProperties.systemProperty('tests.clustername', "${-> nodes[0].clusterName}") // dump errors and warnings from cluster log on failure TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() { @@ -150,12 +152,13 @@ class RestIntegTestTask extends DefaultTask { } } } else { - if (System.getProperty("tests.cluster") == null) { - throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null") + if (System.getProperty("tests.cluster") == null || System.getProperty("tests.clustername") == null) { + throw new IllegalArgumentException("tests.rest.cluster, tests.cluster, and tests.clustername must all be null or non-null") } // an external cluster was specified and all responsibility for cluster configuration is taken by the user runner.systemProperty('tests.rest.cluster', System.getProperty("tests.rest.cluster")) runner.systemProperty('test.cluster', System.getProperty("tests.cluster")) + runner.systemProperty('test.clustername', System.getProperty("tests.clustername")) } // copy the rest spec/tests into the test resources diff --git a/plugins/examples/security-authorization-engine/build.gradle b/plugins/examples/security-authorization-engine/build.gradle index fba9580525bcc..787cc230eeb18 100644 --- a/plugins/examples/security-authorization-engine/build.gradle +++ b/plugins/examples/security-authorization-engine/build.gradle @@ -12,7 +12,7 @@ esplugin { dependencies { compileOnly "org.elasticsearch.plugin:x-pack-core:${versions.elasticsearch}" - testCompile "org.elasticsearch.client:x-pack-transport:${versions.elasticsearch}" + testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${versions.elasticsearch}" } integTest { diff --git a/plugins/examples/security-authorization-engine/src/test/java/org/elasticsearch/example/CustomAuthorizationEngineIT.java b/plugins/examples/security-authorization-engine/src/test/java/org/elasticsearch/example/CustomAuthorizationEngineIT.java index 9daf9bd01a8bc..4342b2a4b88f0 100644 --- a/plugins/examples/security-authorization-engine/src/test/java/org/elasticsearch/example/CustomAuthorizationEngineIT.java +++ b/plugins/examples/security-authorization-engine/src/test/java/org/elasticsearch/example/CustomAuthorizationEngineIT.java @@ -24,22 +24,21 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.user.User; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.client.SecurityClient; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.Base64; -import java.util.Collection; import java.util.Collections; +import java.util.List; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; @@ -50,26 +49,21 @@ * an external cluster with the custom authorization plugin installed to validate the functionality * when running as a plugin */ -public class CustomAuthorizationEngineIT extends ESIntegTestCase { +public class CustomAuthorizationEngineIT extends ESRestTestCase { @Override - protected Settings externalClusterClientSettings() { + protected Settings restClientSettings() { final String token = "Basic " + Base64.getEncoder().encodeToString(("test_user:x-pack-test-password").getBytes(StandardCharsets.UTF_8)); return Settings.builder() .put(ThreadContext.PREFIX + ".Authorization", token) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") .build(); } - @Override - protected Collection> transportClientPlugins() { - return Collections.singleton(XPackClientPlugin.class); - } - public void testClusterAction() throws IOException { - SecurityClient securityClient = new SecurityClient(client()); - securityClient.preparePutUser("custom_user", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "custom_superuser").get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user", List.of("custom_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); { RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); @@ -77,25 +71,27 @@ public void testClusterAction() throws IOException { basicAuthHeaderValue("custom_user", new SecureString("x-pack-test-password".toCharArray()))); Request request = new Request("GET", "_cluster/health"); request.setOptions(options); - Response response = getRestClient().performRequest(request); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); } { - securityClient.preparePutUser("custom_user2", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "not_superuser").get(); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user2", List.of("not_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); options.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, basicAuthHeaderValue("custom_user2", new SecureString("x-pack-test-password".toCharArray()))); Request request = new Request("GET", "_cluster/health"); request.setOptions(options); - ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } } public void testIndexAction() throws IOException { - SecurityClient securityClient = new SecurityClient(client()); - securityClient.preparePutUser("custom_user", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "custom_superuser").get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user", List.of("custom_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); { RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); @@ -103,27 +99,31 @@ public void testIndexAction() throws IOException { basicAuthHeaderValue("custom_user", new SecureString("x-pack-test-password".toCharArray()))); Request request = new Request("PUT", "/index"); request.setOptions(options); - Response response = getRestClient().performRequest(request); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); } { - securityClient.preparePutUser("custom_user2", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "not_superuser").get(); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user2", List.of("not_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); options.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, basicAuthHeaderValue("custom_user2", new SecureString("x-pack-test-password".toCharArray()))); Request request = new Request("PUT", "/index"); request.setOptions(options); - ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } } public void testRunAs() throws IOException { - SecurityClient securityClient = new SecurityClient(client()); - securityClient.preparePutUser("custom_user", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "custom_superuser").get(); - securityClient.preparePutUser("custom_user2", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "custom_superuser").get(); - securityClient.preparePutUser("custom_user3", "x-pack-test-password".toCharArray(), Hasher.BCRYPT, "not_superuser").get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user", List.of("custom_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user2", List.of("custom_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); + restClient.security().putUser(PutUserRequest.withPassword(new User("custom_user3", List.of("not_superuser")), + "x-pack-test-password".toCharArray(), true, RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); { RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); @@ -132,7 +132,7 @@ public void testRunAs() throws IOException { options.addHeader("es-security-runas-user", "custom_user2"); Request request = new Request("GET", "/_security/_authenticate"); request.setOptions(options); - Response response = getRestClient().performRequest(request); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); String responseStr = EntityUtils.toString(response.getEntity()); assertThat(responseStr, containsString("custom_user2")); @@ -145,7 +145,7 @@ public void testRunAs() throws IOException { options.addHeader("es-security-runas-user", "custom_user3"); Request request = new Request("PUT", "/index"); request.setOptions(options); - ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } @@ -156,8 +156,14 @@ public void testRunAs() throws IOException { options.addHeader("es-security-runas-user", "custom_user2"); Request request = new Request("PUT", "/index"); request.setOptions(options); - ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } } + + private class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), restClient -> {}, Collections.emptyList()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index d338a4663a32a..fab08ab1c03f7 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -251,6 +251,7 @@ public class Node implements Closeable { private final Collection pluginLifecycleComponents; private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; + final NamedWriteableRegistry namedWriteableRegistry; public Node(Environment environment) { this(environment, Collections.emptyList(), true); @@ -589,6 +590,7 @@ protected Node( this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents); client.initialize(injector.getInstance(new Key>() {}), () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); + this.namedWriteableRegistry = namedWriteableRegistry; logger.debug("initializing HTTP handlers ..."); actionModule.initRestHandlers(() -> clusterService.state().nodes()); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 31b8ba01dc4a8..b43e438bc3210 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.MockInternalClusterInfoService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -179,4 +180,8 @@ protected HttpServerTransport newHttpTransport(NetworkModule networkModule) { protected void configureNodeAndClusterIdStateListener(ClusterService clusterService) { //do not configure this in tests as this is causing SetOnce to throw exceptions when jvm is used for multiple tests } + + public NamedWriteableRegistry getNamedWriteableRegistry() { + return namedWriteableRegistry; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index d45c83444b2fc..94a8e9b7728ce 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -289,6 +289,11 @@ public abstract class ESIntegTestCase extends ESTestCase { */ public static final String TESTS_CLUSTER = "tests.cluster"; + /** + * Key used to eventually switch to using an external cluster and provide the cluster name + */ + public static final String TESTS_CLUSTER_NAME = "tests.clustername"; + /** * Key used to retrieve the index random seed from the index settings on a running node. * The value of this seed can be used to initialize a random context for a specific index. @@ -1829,7 +1834,7 @@ protected Settings transportClientSettings() { return Settings.EMPTY; } - private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException { + private ExternalTestCluster buildExternalCluster(String clusterAddresses, String clusterName) throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; @@ -1838,7 +1843,8 @@ private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws InetAddress inetAddress = InetAddress.getByName(url.getHost()); transportAddresses[i++] = new TransportAddress(new InetSocketAddress(inetAddress, url.getPort())); } - return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses); + return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), nodePlugins(), getClientWrapper(), clusterName, + transportAddresses); } protected Settings externalClusterClientSettings() { @@ -1855,7 +1861,11 @@ protected TestCluster buildTestCluster(Scope scope, long seed) throws IOExceptio if (scope == Scope.TEST) { throw new IllegalArgumentException("Cannot run TEST scope test with " + TESTS_CLUSTER); } - return buildExternalCluster(clusterAddresses); + String clusterName = System.getProperty(TESTS_CLUSTER_NAME); + if (Strings.isNullOrEmpty(clusterName)) { + throw new IllegalArgumentException("External test cluster name must be provided"); + } + return buildExternalCluster(clusterAddresses, clusterName); } final String nodePrefix; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java index 74edfd3a46514..e77d143e50d99 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -32,18 +33,23 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.transport.MockTransportClient; import org.elasticsearch.transport.nio.MockNioTransportPlugin; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; -import java.util.Collections; +import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.stream.Collectors; import static org.elasticsearch.test.ESTestCase.getTestTransportType; import static org.hamcrest.Matchers.equalTo; @@ -61,7 +67,8 @@ public final class ExternalTestCluster extends TestCluster { private static final AtomicInteger counter = new AtomicInteger(); public static final String EXTERNAL_CLUSTER_PREFIX = "external_"; - private final MockTransportClient client; + private final MockNode node; + private final Client client; private final InetSocketAddress[] httpAddresses; @@ -71,13 +78,21 @@ public final class ExternalTestCluster extends TestCluster { private final int numMasterAndDataNodes; public ExternalTestCluster(Path tempDir, Settings additionalSettings, Collection> pluginClasses, - TransportAddress... transportAddresses) { + Function clientWrapper, String clusterName, TransportAddress... transportAddresses) { super(0); + this.clusterName = clusterName; Settings.Builder clientSettingsBuilder = Settings.builder() .put(additionalSettings) - .put("node.name", InternalTestCluster.TRANSPORT_CLIENT_PREFIX + EXTERNAL_CLUSTER_PREFIX + counter.getAndIncrement()) - .put("client.transport.ignore_cluster_name", true) - .put(Environment.PATH_HOME_SETTING.getKey(), tempDir); + .put("node.master", false) + .put("node.data", false) + .put("node.ingest", false) + .put("node.name", EXTERNAL_CLUSTER_PREFIX + counter.getAndIncrement()) + .put("cluster.name", clusterName) + .putList("discovery.seed_hosts", + Arrays.stream(transportAddresses).map(TransportAddress::toString).collect(Collectors.toList())); + if (Environment.PATH_HOME_SETTING.exists(additionalSettings) == false) { + clientSettingsBuilder.put(Environment.PATH_HOME_SETTING.getKey(), tempDir); + } boolean addMockTcpTransport = additionalSettings.get(NetworkModule.TRANSPORT_TYPE_KEY) == null; if (addMockTcpTransport) { @@ -88,13 +103,15 @@ public ExternalTestCluster(Path tempDir, Settings additionalSettings, Collection pluginClasses.add(MockNioTransportPlugin.class); } } + pluginClasses = new ArrayList<>(pluginClasses); + pluginClasses.add(MockHttpTransport.TestPlugin.class); Settings clientSettings = clientSettingsBuilder.build(); - MockTransportClient client = new MockTransportClient(clientSettings, pluginClasses); + MockNode node = new MockNode(clientSettings, pluginClasses); + Client client = clientWrapper.apply(node.client()); try { - client.addTransportAddresses(transportAddresses); + node.start(); NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().clear().setSettings(true).setHttp(true).get(); httpAddresses = new InetSocketAddress[nodeInfos.getNodes().size()]; - this.clusterName = nodeInfos.getClusterName().value(); int dataNodes = 0; int masterAndDataNodes = 0; for (int i = 0; i < nodeInfos.getNodes().size(); i++) { @@ -110,10 +127,22 @@ public ExternalTestCluster(Path tempDir, Settings additionalSettings, Collection this.numDataNodes = dataNodes; this.numMasterAndDataNodes = masterAndDataNodes; this.client = client; + this.node = node; logger.info("Setup ExternalTestCluster [{}] made of [{}] nodes", nodeInfos.getClusterName().value(), size()); + } catch (NodeValidationException e) { + try { + IOUtils.close(client, node); + } catch (IOException e1) { + e.addSuppressed(e1); + } + throw new ElasticsearchException(e); } catch (Exception e) { - client.close(); + try { + IOUtils.close(client, node); + } catch (IOException e1) { + e.addSuppressed(e1); + } throw e; } } @@ -150,7 +179,7 @@ public InetSocketAddress[] httpAddresses() { @Override public void close() throws IOException { - client.close(); + IOUtils.close(client, node); } @Override @@ -181,12 +210,12 @@ public void ensureEstimatedStats() { @Override public Iterable getClients() { - return Collections.singleton(client); + return List.of(client); } @Override public NamedWriteableRegistry getNamedWriteableRegistry() { - return client.getNamedWriteableRegistry(); + return node.getNamedWriteableRegistry(); } @Override diff --git a/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc deleted file mode 100644 index a19532bdb67c5..0000000000000 --- a/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc +++ /dev/null @@ -1,203 +0,0 @@ -[[java-clients]] -=== Java Client and security - -deprecated[7.0.0, The `TransportClient` is deprecated in favour of the {java-rest}/java-rest-high.html[Java High Level REST Client] and will be removed in Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all the steps needed to migrate.] - -The {es} {security-features} support the Java http://www.elastic.co/guide/en/elasticsearch/client/java-api/current/transport-client.html[transport client] for Elasticsearch. -The transport client uses the same transport protocol that the cluster nodes use -for inter-node communication. It is very efficient as it does not have to marshall -and unmarshall JSON requests like a typical REST client. - -NOTE: Using the Java Node Client with secured clusters is not recommended or - supported. - -[float] -[[transport-client]] -==== Configuring the Transport Client to work with a Secured Cluster - -To use the transport client with a secured cluster, you need to: - -[[java-transport-client-role]] -. {ref}/setup-xpack-client.html[Configure the {xpack} transport client]. - -. Configure a user with the privileges required to start the transport client. -A default `transport_client` role is built-in to the {es} {security-features}, -which grants the -appropriate cluster permissions for the transport client to work with the secured -cluster. The transport client uses the _Nodes Info API_ to fetch information about -the nodes in the cluster. - -. Set up the transport client. At a minimum, you must configure `xpack.security.user` to -include the name and password of your transport client user in your requests. The -following snippet configures the user credentials globally--every request -submitted with this client includes the `transport_client_user` credentials in -its headers. -+ --- -[source,java] -------------------------------------------------------------------------------------------------- -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - .put("xpack.security.user", "transport_client_user:x-pack-test-password") - ... - .build()) - .addTransportAddress(new TransportAddress("localhost", 9300)) - .addTransportAddress(new TransportAddress("localhost", 9301)); -------------------------------------------------------------------------------------------------- - -WARNING: If you configure a transport client without SSL, passwords are sent in - clear text. - -You can also add an `Authorization` header to each request. If you've configured -global authorization credentials, the `Authorization` header overrides the global -authentication credentials. This is useful when an application has multiple users -who access Elasticsearch using the same client. You can set the global token to -a user that only has the `transport_client` role, and add the `transport_client` -role to the individual users. - -For example, the following snippet adds the `Authorization` header to a search -request: - -[source,java] --------------------------------------------------------------------------------------------------- -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; - -import static UsernamePasswordToken.basicAuthHeaderValue; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - .put("xpack.security.user", "transport_client_user:x-pack-test-password") - ... - .build()) - .build() - .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9300)) - .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9301)) - -String token = basicAuthHeaderValue("test_user", new SecureString("x-pack-test-password".toCharArray())); - -client.filterWithHeader(Collections.singletonMap("Authorization", token)) - .prepareSearch().get(); --------------------------------------------------------------------------------------------------- --- - -. Enable SSL to authenticate clients and encrypt communications. To enable SSL, -you need to: - -.. Configure the paths to the client's key and certificate in addition to the certificate authorities. -Client authentication requires every client to have a certification signed by a trusted CA. -+ --- -NOTE: Client authentication is enabled by default. For information about - disabling client authentication, see <>. - -[source,java] --------------------------------------------------------------------------------------------------- -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - .put("xpack.security.user", "transport_client_user:x-pack-test-password") - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.key", "/path/to/client.key") - .put("xpack.security.transport.ssl.certificate", "/path/to/client.crt") - .put("xpack.security.transport.ssl.certificate_authorities", "/path/to/ca.crt") - ... - .build()); --------------------------------------------------------------------------------------------------- --- - -.. Enable the SSL transport by setting `xpack.security.transport.ssl.enabled` to `true` in the -client configuration. -+ --- -[source,java] --------------------------------------------------------------------------------------------------- -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - .put("xpack.security.user", "transport_client_user:x-pack-test-password") - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.key", "/path/to/client.key") - .put("xpack.security.transport.ssl.certificate", "/path/to/client.crt") - .put("xpack.security.transport.ssl.certificate_authorities", "/path/to/ca.crt") - .put("xpack.security.transport.ssl.enabled", "true") - ... - .build()) - .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9300)) - .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9301)) --------------------------------------------------------------------------------------------------- --- - -[float] -[[disabling-client-auth]] -===== Disabling client authentication - -If you want to disable client authentication, you can use a client-specific -transport protocol. For more information see <>. - -If you are not using client authentication and sign the Elasticsearch node -certificates with your own CA, you need to provide the path to the CA -certificate in your client configuration. - -[source,java] ------------------------------------------------------------------------------------------------------- -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - .put("xpack.security.user", "test_user:x-pack-test-password") - .put("xpack.security.transport.ssl.certificate_authorities", "/path/to/ca.crt") - .put("xpack.security.transport.ssl.enabled", "true") - ... - .build()) - .addTransportAddress(new TransportAddress("localhost", 9300)) - .addTransportAddress(new TransportAddress("localhost", 9301)); ------------------------------------------------------------------------------------------------------- - -NOTE: If you are using a public CA that is already trusted by the Java runtime, - you do not need to set the `xpack.security.transport.ssl.certificate_authorities`. - -[float] -[[connecting-anonymously]] -===== Connecting anonymously - -To enable the transport client to connect anonymously, you must assign the -anonymous user the privileges defined in the <> -role. Anonymous access must also be enabled, of course. For more information, -see <>. - -[float] -[[security-client]] -==== Security client - -The {stack} {security-features} expose an API through the `SecurityClient` class. -To get a hold of a `SecurityClient` you first need to create the `XPackClient`, -which is a wrapper around the existing {es} clients (any client class implementing -`org.elasticsearch.client.Client`). - -The following example shows how you can clear the realm caches using -the `SecurityClient`: - -[source,java] ------------------------------------------------------------------------------------------------------- -Client client = ... // create the transport client - -XPackClient xpackClient = new XPackClient(client); -SecurityClient securityClient = xpackClient.security(); -ClearRealmCacheResponse response = securityClient.authc().prepareClearRealmCache() - .realms("ldap1", "ad1") <1> - .usernames("rdeniro") - .get(); ------------------------------------------------------------------------------------------------------- -<1> Clears the `ldap1` and `ad1` realm caches for the `rdeniro` user. diff --git a/x-pack/docs/en/watcher/java.asciidoc b/x-pack/docs/en/watcher/java.asciidoc deleted file mode 100644 index 7224196834f9b..0000000000000 --- a/x-pack/docs/en/watcher/java.asciidoc +++ /dev/null @@ -1,130 +0,0 @@ -[[api-java]] -== Java API - -deprecated[7.0.0, The `TransportClient` is deprecated in favour of the {java-rest}/java-rest-high.html[Java High Level REST Client] and will be removed in Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all the steps needed to migrate.] - -{xpack} provides a Java client called `WatcherClient` that adds native Java -support for the {watcher}. - -To obtain a `WatcherClient` instance, make sure you first set up the -`XPackClient`. - -[float] -=== Installing XPackClient - -You first need to make sure the +x-pack-transport-{version}+ JAR file is in the classpath. -You can extract this jar from the downloaded {xpack} bundle. - -If you use Maven to manage dependencies, add the following to the `pom.xml`: - -["source","xml",subs="attributes,callouts"] --------------------------------------------------- - - - - - - elasticsearch-releases - https://artifacts.elastic.co/maven - - true - - - false - - - ... - - ... - - - - - org.elasticsearch.client - x-pack-transport - {version} - - ... - - ... - - --------------------------------------------------- - -If you use Gradle, add the dependencies to `build.gradle`: - -["source","groovy",subs="attributes,callouts"] --------------------------------------------------------------- -repositories { - /* ... Any other repositories ... */ - - // Add the Elasticsearch Maven Repository - maven { - name "elastic" - url "https://artifacts.elastic.co/maven" - } -} - -dependencies { - // Provide the x-pack jar on the classpath for compilation and at runtime - compile "org.elasticsearch.client:x-pack-transport:{version}" - - /* ... */ -} --------------------------------------------------------------- - -You can also download the https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.jar[X-Pack Transport JAR] -manually, directly from our Maven repository. - -[float] -=== Obtaining the `WatcherClient` - -To obtain an instance of the `WatcherClient` you first need to create the -`XPackClient`. The `XPackClient` is a wrapper around the standard Java -Elasticsearch `Client`: - -[source,java] --------------------------------------------------- -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.core.watcher.client.WatcherClient; -... - -TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() - .put("cluster.name", "myClusterName") - ... - .build()) - .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9300)); - -XPackClient xpackClient = new XPackClient(client); -WatcherClient watcherClient = xpackClient.watcher(); --------------------------------------------------- - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/put-watch.asciidoc -include::java/put-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/get-watch.asciidoc -include::java/get-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/delete-watch.asciidoc -include::java/delete-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/execute-watch.asciidoc -include::java/execute-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/ack-watch.asciidoc -include::java/ack-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/activate-watch.asciidoc -include::java/activate-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/deactivate-watch.asciidoc -include::java/deactivate-watch.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/stats.asciidoc -include::java/stats.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/service.asciidoc -include::java/service.asciidoc[] diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 3eda554a84bd4..c74e39f017a3c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -133,7 +133,6 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E private final SetOnce restoreSourceService = new SetOnce<>(); private final SetOnce ccrSettings = new SetOnce<>(); private Client client; - private final boolean transportClientMode; /** * Construct an instance of the CCR container with the specified settings. @@ -155,7 +154,6 @@ public Ccr(final Settings settings) { this.settings = settings; this.enabled = CCR_ENABLED_SETTING.get(settings); this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); - this.transportClientMode = XPackPlugin.transportClientMode(settings); } @Override @@ -340,10 +338,6 @@ public void onIndexModule(IndexModule indexModule) { @Override public Collection createGuiceModules() { - if (transportClientMode) { - return Collections.emptyList(); - } - return Collections.singleton(b -> XPackPlugin.bindFeatureSet(b, CCRFeatureSet.class)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a145569898ee6..6b457ae2fda9e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; @@ -13,12 +12,9 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.license.DeleteLicenseAction; import org.elasticsearch.license.GetBasicStatusAction; import org.elasticsearch.license.GetLicenseAction; @@ -34,8 +30,6 @@ import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; @@ -147,8 +141,6 @@ import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.xpack.core.security.SecuritySettings; import org.elasticsearch.xpack.core.security.action.CreateApiKeyAction; import org.elasticsearch.xpack.core.security.action.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyAction; @@ -178,9 +170,7 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; -import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; import org.elasticsearch.xpack.core.sql.SqlFeatureSetUsage; -import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; @@ -197,12 +187,10 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Optional; -import java.util.function.Supplier; +// TODO: merge this into XPackPlugin public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPlugin { static Optional X_PACK_FEATURE = Optional.of("x-pack"); @@ -235,22 +223,6 @@ public List> getSettings() { return settings; } - @Override - public Settings additionalSettings() { - return additionalSettings(settings, XPackSettings.SECURITY_ENABLED.get(settings), XPackPlugin.transportClientMode(settings)); - } - - static Settings additionalSettings(final Settings settings, final boolean enabled, final boolean transportClientMode) { - if (enabled && transportClientMode) { - return Settings.builder() - .put(SecuritySettings.addTransportSettings(settings)) - .put(SecuritySettings.addUserSettings(settings)) - .build(); - } else { - return Settings.EMPTY; - } - } - @Override public List> getClientActions() { return Arrays.asList( @@ -505,27 +477,4 @@ public List getNamedXContent() { DataFrameTransformState::fromXContent) ); } - - @Override - public Map> getTransports( - final Settings settings, - final ThreadPool threadPool, - final PageCacheRecycler pageCacheRecycler, - final CircuitBreakerService circuitBreakerService, - final NamedWriteableRegistry namedWriteableRegistry, - final NetworkService networkService) { - // this should only be used in the transport layer, so do not add it if it is not in transport mode or we are disabled - if (XPackPlugin.transportClientMode(settings) == false || XPackSettings.SECURITY_ENABLED.get(settings) == false) { - return Collections.emptyMap(); - } - final SSLService sslService; - try { - sslService = new SSLService(settings, null); - } catch (Exception e) { - throw new RuntimeException(e); - } - return Collections.singletonMap(SecurityField.NAME4, () -> new SecurityNetty4Transport(settings, Version.CURRENT, threadPool, - networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, sslService)); - } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 2038b35b4e6e0..ababc3c21289a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.multibindings.Multibinder; -import org.elasticsearch.common.inject.util.Providers; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.ClusterSettings; @@ -125,7 +124,6 @@ public Void run() { protected final Settings settings; //private final Environment env; - protected boolean transportClientMode; protected final Licensing licensing; // These should not be directly accessed as they cannot be overridden in tests. Please use the getters so they can be overridden. private static final SetOnce licenseState = new SetOnce<>(); @@ -137,8 +135,7 @@ public XPackPlugin( final Path configPath) { super(settings); this.settings = settings; - this.transportClientMode = transportClientMode(settings); - Environment env = transportClientMode ? null : new Environment(settings, configPath); + Environment env = new Environment(settings, configPath); setSslService(new SSLService(settings, env)); setLicenseState(new XPackLicenseState(settings)); @@ -222,12 +219,7 @@ public Settings additionalSettings() { if (settings.get(xpackInstalledNodeAttrSetting) != null) { throw new IllegalArgumentException("Directly setting [" + xpackInstalledNodeAttrSetting + "] is not permitted"); } - - if (transportClientMode) { - return super.additionalSettings(); - } else { - return Settings.builder().put(super.additionalSettings()).put(xpackInstalledNodeAttrSetting, "true").build(); - } + return Settings.builder().put(super.additionalSettings()).put(xpackInstalledNodeAttrSetting, "true").build(); } @Override @@ -236,10 +228,6 @@ public Collection createGuiceModules() { //modules.add(b -> b.bind(Clock.class).toInstance(getClock())); // used to get core up and running, we do not bind the actual feature set here modules.add(b -> XPackPlugin.createFeatureSetMultiBinder(b, EmptyXPackFeatureSet.class)); - - if (transportClientMode) { - modules.add(b -> b.bind(XPackLicenseState.class).toProvider(Providers.of(null))); - } return modules; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java index 9696ca6e7fde7..2d4991d514027 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java @@ -22,6 +22,7 @@ import java.util.Collection; import java.util.concurrent.CountDownLatch; +@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public abstract class AbstractLicensesIntegrationTestCase extends ESIntegTestCase { @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java index 1b7d889d7262a..1f09f959883f3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java @@ -25,7 +25,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -@ESIntegTestCase.ClusterScope(scope = SUITE) +@ESIntegTestCase.ClusterScope(scope = SUITE, transportClientRatio = 0.0) public class StartBasicLicenseTests extends AbstractLicensesIntegrationTestCase { @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java index ca1c361a5b99f..eac145dd0ffa8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java @@ -24,7 +24,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -@ESIntegTestCase.ClusterScope(scope = SUITE) +@ESIntegTestCase.ClusterScope(scope = SUITE, transportClientRatio = 0.0) public class StartTrialLicenseTests extends AbstractLicensesIntegrationTestCase { @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java index 81be978d33103..b03f51d1d195b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java @@ -54,7 +54,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -@ESIntegTestCase.ClusterScope(numDataNodes = 0) +@ESIntegTestCase.ClusterScope(numDataNodes = 0, transportClientRatio = 0.0) public class SourceOnlySnapshotIT extends ESIntegTestCase { @Override diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java index 3a6ab2e5b71d2..122cd570ab108 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java @@ -7,16 +7,36 @@ package org.elasticsearch.xpack.dataframe.integration; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.core.AcknowledgedResponse; +import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; +import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.StartDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.StartDataFrameTransformResponse; +import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.StopDataFrameTransformResponse; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.dataframe.transforms.DestConfig; +import org.elasticsearch.client.dataframe.transforms.QueryConfig; +import org.elasticsearch.client.dataframe.transforms.SourceConfig; +import org.elasticsearch.client.dataframe.transforms.pivot.AggregationConfig; +import org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource; +import org.elasticsearch.client.dataframe.transforms.pivot.GroupConfig; +import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfig; +import org.elasticsearch.client.dataframe.transforms.pivot.SingleGroupSource; +import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; @@ -26,36 +46,15 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; -import org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.QueryConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.AggregationConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.DateHistogramGroupSource; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.SingleGroupSource; -import org.elasticsearch.xpack.core.security.SecurityField; - -import java.net.URISyntaxException; -import java.nio.file.Path; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.time.ZoneId; -import java.util.Arrays; -import java.util.Collection; +import java.util.Base64; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -64,18 +63,18 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.core.Is.is; -abstract class DataFrameIntegTestCase extends ESIntegTestCase { +abstract class DataFrameIntegTestCase extends ESRestTestCase { protected static final String REVIEWS_INDEX_NAME = "data_frame_reviews"; private Map transformConfigs = new HashMap<>(); - protected void cleanUp() { + protected void cleanUp() throws IOException { cleanUpTransforms(); waitForPendingTasks(); } - protected void cleanUpTransforms() { + protected void cleanUpTransforms() throws IOException { for (DataFrameTransformConfig config : transformConfigs.values()) { stopDataFrameTransform(config.getId()); deleteDataFrameTransform(config.getId()); @@ -83,41 +82,42 @@ protected void cleanUpTransforms() { transformConfigs.clear(); } - protected StopDataFrameTransformAction.Response stopDataFrameTransform(String id) { - return client().execute(StopDataFrameTransformAction.INSTANCE, - new StopDataFrameTransformAction.Request(id, true, false, null)).actionGet(); + protected StopDataFrameTransformResponse stopDataFrameTransform(String id) throws IOException { + RestHighLevelClient restClient = new TestRestHighLevelClient(); + return restClient.dataFrame().stopDataFrameTransform(new StopDataFrameTransformRequest(id, true, null), RequestOptions.DEFAULT); } - protected StartDataFrameTransformAction.Response startDataFrameTransform(String id) { - return client().execute(StartDataFrameTransformAction.INSTANCE, - new StartDataFrameTransformAction.Request(id, false)).actionGet(); + protected StartDataFrameTransformResponse startDataFrameTransform(String id, RequestOptions options) throws IOException { + RestHighLevelClient restClient = new TestRestHighLevelClient(); + return restClient.dataFrame().startDataFrameTransform(new StartDataFrameTransformRequest(id), options); } - protected AcknowledgedResponse deleteDataFrameTransform(String id) { - AcknowledgedResponse response = client().execute(DeleteDataFrameTransformAction.INSTANCE, - new DeleteDataFrameTransformAction.Request(id)) - .actionGet(); + protected AcknowledgedResponse deleteDataFrameTransform(String id) throws IOException { + RestHighLevelClient restClient = new TestRestHighLevelClient(); + AcknowledgedResponse response = + restClient.dataFrame().deleteDataFrameTransform(new DeleteDataFrameTransformRequest(id), RequestOptions.DEFAULT); if (response.isAcknowledged()) { transformConfigs.remove(id); } return response; } - protected AcknowledgedResponse putDataFrameTransform(DataFrameTransformConfig config) { + protected AcknowledgedResponse putDataFrameTransform(DataFrameTransformConfig config, RequestOptions options) throws IOException { if (transformConfigs.keySet().contains(config.getId())) { throw new IllegalArgumentException("data frame transform [" + config.getId() + "] is already registered"); } - AcknowledgedResponse response = client().execute(PutDataFrameTransformAction.INSTANCE, - new PutDataFrameTransformAction.Request(config)) - .actionGet(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + AcknowledgedResponse response = + restClient.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(config), options); if (response.isAcknowledged()) { transformConfigs.put(config.getId(), config); } return response; } - protected GetDataFrameTransformsStatsAction.Response getDataFrameTransformStats(String id) { - return client().execute(GetDataFrameTransformsStatsAction.INSTANCE, new GetDataFrameTransformsStatsAction.Request(id)).actionGet(); + protected GetDataFrameTransformStatsResponse getDataFrameTransformStats(String id) throws IOException { + RestHighLevelClient restClient = new TestRestHighLevelClient(); + return restClient.dataFrame().getDataFrameTransformStats(new GetDataFrameTransformStatsRequest(id), RequestOptions.DEFAULT); } protected void waitUntilCheckpoint(String id, long checkpoint) throws Exception { @@ -136,38 +136,40 @@ protected void waitUntilCheckpoint(String id, long checkpoint, TimeValue waitTim } protected DateHistogramGroupSource createDateHistogramGroupSource(String field, long interval, ZoneId zone, String format) { - DateHistogramGroupSource source = new DateHistogramGroupSource(field); - source.setFormat(format); - source.setInterval(interval); - source.setTimeZone(zone); - return source; + DateHistogramGroupSource.Builder builder = DateHistogramGroupSource.builder() + .setField(field) + .setFormat(format) + .setInterval(interval) + .setTimeZone(zone); + return builder.build(); } protected DateHistogramGroupSource createDateHistogramGroupSource(String field, DateHistogramInterval interval, ZoneId zone, String format) { - DateHistogramGroupSource source = new DateHistogramGroupSource(field); - source.setFormat(format); - source.setDateHistogramInterval(interval); - source.setTimeZone(zone); - return source; + DateHistogramGroupSource.Builder builder = DateHistogramGroupSource.builder() + .setField(field) + .setFormat(format) + .setDateHistgramInterval(interval) + .setTimeZone(zone); + return builder.build(); } protected GroupConfig createGroupConfig(Map groups) throws Exception { - Map lazyParsed = new HashMap<>(groups.size()); - for(Map.Entry sgs : groups.entrySet()) { - lazyParsed.put(sgs.getKey(), Collections.singletonMap(sgs.getValue().getType().value(), toLazy(sgs.getValue()))); + GroupConfig.Builder builder = GroupConfig.builder(); + for (Map.Entry sgs : groups.entrySet()) { + builder.groupBy(sgs.getKey(), sgs.getValue()); } - return new GroupConfig(lazyParsed, groups); + return builder.build(); } protected QueryConfig createQueryConfig(QueryBuilder queryBuilder) throws Exception { - return new QueryConfig(toLazy(queryBuilder), queryBuilder); + return new QueryConfig(queryBuilder); } protected AggregationConfig createAggConfig(AggregatorFactories.Builder aggregations) throws Exception { - return new AggregationConfig(toLazy(aggregations), aggregations); + return new AggregationConfig(aggregations); } protected PivotConfig createPivotConfig(Map groups, @@ -178,7 +180,11 @@ protected PivotConfig createPivotConfig(Map groups, protected PivotConfig createPivotConfig(Map groups, AggregatorFactories.Builder aggregations, Integer size) throws Exception { - return new PivotConfig(createGroupConfig(groups), createAggConfig(aggregations), size); + PivotConfig.Builder builder = PivotConfig.builder() + .setGroups(createGroupConfig(groups)) + .setAggregationConfig(createAggConfig(aggregations)) + .setMaxPageSearchSize(size); + return builder.build(); } protected DataFrameTransformConfig createTransformConfig(String id, @@ -195,16 +201,18 @@ protected DataFrameTransformConfig createTransformConfig(String id, String destinationIndex, QueryBuilder queryBuilder, String... sourceIndices) throws Exception { - return new DataFrameTransformConfig(id, - new SourceConfig(sourceIndices, createQueryConfig(queryBuilder)), - new DestConfig(destinationIndex), - Collections.emptyMap(), - createPivotConfig(groups, aggregations), - "Test data frame transform config id: " + id); + return DataFrameTransformConfig.builder() + .setId(id) + .setSource(SourceConfig.builder().setIndex(sourceIndices).setQueryConfig(createQueryConfig(queryBuilder)).build()) + .setDest(new DestConfig(destinationIndex)) + .setPivotConfig(createPivotConfig(groups, aggregations)) + .setDescription("Test data frame transform config id: " + id) + .build(); } protected void createReviewsIndex() throws Exception { final int numDocs = 1000; + RestHighLevelClient restClient = new TestRestHighLevelClient(); // create mapping try (XContentBuilder builder = jsonBuilder()) { @@ -229,16 +237,13 @@ protected void createReviewsIndex() throws Exception { .endObject(); } builder.endObject(); - CreateIndexResponse response = client().admin() - .indices() - .prepareCreate(REVIEWS_INDEX_NAME) - .addMapping("_doc", builder) - .get(); + CreateIndexResponse response = + restClient.indices().create(new CreateIndexRequest(REVIEWS_INDEX_NAME).mapping(builder), RequestOptions.DEFAULT); assertThat(response.isAcknowledged(), is(true)); } // create index - BulkRequestBuilder bulk = client().prepareBulk(REVIEWS_INDEX_NAME, "_doc"); + BulkRequest bulk = new BulkRequest(REVIEWS_INDEX_NAME); int day = 10; for (int i = 0; i < numDocs; i++) { long user = i % 28; @@ -267,15 +272,15 @@ protected void createReviewsIndex() throws Exception { bulk.add(new IndexRequest().source(sourceBuilder.toString(), XContentType.JSON)); if (i % 50 == 0) { - BulkResponse response = client().bulk(bulk.request()).get(); + BulkResponse response = restClient.bulk(bulk, RequestOptions.DEFAULT); assertThat(response.buildFailureMessage(), response.hasFailures(), is(false)); - bulk = client().prepareBulk(REVIEWS_INDEX_NAME, "_doc"); + bulk = new BulkRequest(REVIEWS_INDEX_NAME); day += 1; } } - BulkResponse response = client().bulk(bulk.request()).get(); + BulkResponse response = restClient.bulk(bulk, RequestOptions.DEFAULT); assertThat(response.buildFailureMessage(), response.hasFailures(), is(false)); - client().admin().indices().prepareRefresh(REVIEWS_INDEX_NAME).get(); + restClient.indices().refresh(new RefreshRequest(REVIEWS_INDEX_NAME), RequestOptions.DEFAULT); } protected Map toLazy(ToXContent parsedObject) throws Exception { @@ -293,8 +298,9 @@ private void waitForPendingTasks() { listTasksRequest.setWaitForCompletion(true); listTasksRequest.setDetailed(true); listTasksRequest.setTimeout(TimeValue.timeValueSeconds(10)); + RestHighLevelClient restClient = new TestRestHighLevelClient(); try { - admin().cluster().listTasks(listTasksRequest).get(); + restClient.tasks().list(listTasksRequest, RequestOptions.DEFAULT); } catch (Exception e) { throw new AssertionError("Failed to wait for pending tasks to complete", e); } @@ -307,33 +313,17 @@ protected NamedXContentRegistry xContentRegistry() { } @Override - protected Settings externalClusterClientSettings() { - Path key; - Path certificate; - try { - key = PathUtils.get(getClass().getResource("/testnode.pem").toURI()); - certificate = PathUtils.get(getClass().getResource("/testnode.crt").toURI()); - } catch (URISyntaxException e) { - throw new IllegalStateException("error trying to get keystore path", e); - } - Settings.Builder builder = Settings.builder(); - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4); - builder.put(SecurityField.USER_SETTING.getKey(), "x_pack_rest_user:" + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); - builder.put("xpack.security.transport.ssl.enabled", true); - builder.put("xpack.security.transport.ssl.key", key.toAbsolutePath().toString()); - builder.put("xpack.security.transport.ssl.certificate", certificate.toAbsolutePath().toString()); - builder.put("xpack.security.transport.ssl.key_passphrase", "testnode"); - builder.put("xpack.security.transport.ssl.verification_mode", "certificate"); - return builder.build(); - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); + protected Settings restClientSettings() { + final String token = "Basic " + + Base64.getEncoder().encodeToString(("x_pack_rest_user:x-pack-test-password").getBytes(StandardCharsets.UTF_8)); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class); + private class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), restClient -> {}, Collections.emptyList()); + } } } diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index cc2e8c4436e06..363218d1b0f14 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -6,16 +6,18 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.core.IndexerState; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.client.dataframe.transforms.pivot.SingleGroupSource; +import org.elasticsearch.client.dataframe.transforms.pivot.TermsGroupSource; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.SingleGroupSource; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.TermsGroupSource; -import org.elasticsearch.xpack.core.indexing.IndexerState; import org.junit.After; +import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -24,7 +26,7 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { @After - public void cleanTransforms() { + public void cleanTransforms() throws IOException { cleanUp(); } @@ -34,8 +36,8 @@ public void testDataFrameTransformCrud() throws Exception { Map groups = new HashMap<>(); groups.put("by-day", createDateHistogramGroupSource("timestamp", DateHistogramInterval.DAY, null, null)); - groups.put("by-user", new TermsGroupSource("user_id")); - groups.put("by-business", new TermsGroupSource("business_id")); + groups.put("by-user", TermsGroupSource.builder().setField("user_id").build()); + groups.put("by-business", TermsGroupSource.builder().setField("business_id").build()); AggregatorFactories.Builder aggs = AggregatorFactories.builder() .addAggregator(AggregationBuilders.avg("review_score").field("stars")) @@ -47,8 +49,10 @@ public void testDataFrameTransformCrud() throws Exception { "reviews-by-user-business-day", REVIEWS_INDEX_NAME); - assertTrue(putDataFrameTransform(config).isAcknowledged()); - assertTrue(startDataFrameTransform(config.getId()).isStarted()); + final RequestOptions options = + expectWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + assertTrue(putDataFrameTransform(config, options).isAcknowledged()); + assertTrue(startDataFrameTransform(config.getId(), options).isStarted()); waitUntilCheckpoint(config.getId(), 1L); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java index 7d0fb179a2228..d6ef3cc641be2 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java @@ -7,24 +7,23 @@ package org.elasticsearch.xpack.dataframe.integration; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; @@ -34,11 +33,10 @@ import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.HistogramGroupSource; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.dataframe.transforms.TransformProgressGatherer; -import java.util.Arrays; -import java.util.Collection; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Collections; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -47,10 +45,11 @@ import static org.hamcrest.Matchers.is; @LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") -public class DataFrameTransformProgressIT extends ESIntegTestCase { +public class DataFrameTransformProgressIT extends ESRestTestCase { protected void createReviewsIndex() throws Exception { final int numDocs = 1000; + final RestHighLevelClient restClient = new TestRestHighLevelClient(); // create mapping try (XContentBuilder builder = jsonBuilder()) { @@ -75,16 +74,13 @@ protected void createReviewsIndex() throws Exception { .endObject(); } builder.endObject(); - CreateIndexResponse response = client().admin() - .indices() - .prepareCreate(REVIEWS_INDEX_NAME) - .addMapping("_doc", builder) - .get(); + CreateIndexResponse response = restClient.indices() + .create(new CreateIndexRequest(REVIEWS_INDEX_NAME).mapping(builder), RequestOptions.DEFAULT); assertThat(response.isAcknowledged(), is(true)); } // create index - BulkRequestBuilder bulk = client().prepareBulk(REVIEWS_INDEX_NAME, "_doc"); + BulkRequest bulk = new BulkRequest(REVIEWS_INDEX_NAME); int day = 10; for (int i = 0; i < numDocs; i++) { long user = i % 28; @@ -113,14 +109,14 @@ protected void createReviewsIndex() throws Exception { bulk.add(new IndexRequest().source(sourceBuilder.toString(), XContentType.JSON)); if (i % 50 == 0) { - BulkResponse response = client().bulk(bulk.request()).get(); + BulkResponse response = restClient.bulk(bulk, RequestOptions.DEFAULT); assertThat(response.buildFailureMessage(), response.hasFailures(), is(false)); - bulk = client().prepareBulk(REVIEWS_INDEX_NAME, "_doc"); + bulk = new BulkRequest(REVIEWS_INDEX_NAME); day += 1; } } - client().bulk(bulk.request()).get(); - client().admin().indices().prepareRefresh(REVIEWS_INDEX_NAME).get(); + restClient.bulk(bulk, RequestOptions.DEFAULT); + restClient.indices().refresh(new RefreshRequest(REVIEWS_INDEX_NAME), RequestOptions.DEFAULT); } public void testGetProgress() throws Exception { @@ -140,10 +136,11 @@ public void testGetProgress() throws Exception { pivotConfig, null); - PlainActionFuture progressFuture = new PlainActionFuture<>(); - TransformProgressGatherer.getInitialProgress(client(), config, progressFuture); + final RestHighLevelClient restClient = new TestRestHighLevelClient(); + SearchResponse response = restClient.search(TransformProgressGatherer.getSearchRequest(config), RequestOptions.DEFAULT); - DataFrameTransformProgress progress = progressFuture.get(); + DataFrameTransformProgress progress = + TransformProgressGatherer.searchResponseToDataFrameTransformProgressFunction().apply(response); assertThat(progress.getTotalDocs(), equalTo(1000L)); assertThat(progress.getRemainingDocs(), equalTo(1000L)); @@ -160,34 +157,28 @@ public void testGetProgress() throws Exception { pivotConfig, null); - - progressFuture = new PlainActionFuture<>(); - - TransformProgressGatherer.getInitialProgress(client(), config, progressFuture); - progress = progressFuture.get(); + response = restClient.search(TransformProgressGatherer.getSearchRequest(config), RequestOptions.DEFAULT); + progress = TransformProgressGatherer.searchResponseToDataFrameTransformProgressFunction().apply(response); assertThat(progress.getTotalDocs(), equalTo(35L)); assertThat(progress.getRemainingDocs(), equalTo(35L)); assertThat(progress.getPercentComplete(), equalTo(0.0)); - client().admin().indices().prepareDelete(REVIEWS_INDEX_NAME).get(); + deleteIndex(REVIEWS_INDEX_NAME); } @Override - protected Settings externalClusterClientSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4); - builder.put(SecurityField.USER_SETTING.getKey(), "x_pack_rest_user:" + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); - return builder.build(); + protected Settings restClientSettings() { + final String token = "Basic " + + Base64.getEncoder().encodeToString(("x_pack_rest_user:x-pack-test-password").getBytes(StandardCharsets.UTF_8)); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); } - @Override - protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class); + private class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), restClient -> {}, Collections.emptyList()); + } } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java index 34343e5fe8820..e8206311c012b 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java @@ -92,7 +92,6 @@ public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlu private final boolean enabled; private final Settings settings; - private final boolean transportClientMode; private final SetOnce dataFrameTransformsConfigManager = new SetOnce<>(); private final SetOnce dataFrameAuditor = new SetOnce<>(); private final SetOnce dataFrameTransformsCheckpointService = new SetOnce<>(); @@ -100,19 +99,12 @@ public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlu public DataFrame(Settings settings) { this.settings = settings; - this.enabled = XPackSettings.DATA_FRAME_ENABLED.get(settings); - this.transportClientMode = XPackPlugin.transportClientMode(settings); } @Override public Collection createGuiceModules() { List modules = new ArrayList<>(); - - if (transportClientMode) { - return modules; - } - modules.add(b -> XPackPlugin.bindFeatureSet(b, DataFrameFeatureSet.class)); return modules; } @@ -159,7 +151,7 @@ public List getRestHandlers(final Settings settings, final RestCont @Override public List> getExecutorBuilders(Settings settings) { - if (false == enabled || transportClientMode) { + if (false == enabled) { return emptyList(); } @@ -173,7 +165,7 @@ public List> getExecutorBuilders(Settings settings) { public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - if (enabled == false || transportClientMode) { + if (enabled == false) { return emptyList(); } dataFrameAuditor.set(new DataFrameAuditor(client, clusterService.getNodeName())); @@ -203,7 +195,7 @@ public UnaryOperator> getIndexTemplateMetaDat @Override public List> getPersistentTasksExecutor(ClusterService clusterService, ThreadPool threadPool, Client client, SettingsModule settingsModule) { - if (enabled == false || transportClientMode) { + if (enabled == false) { return emptyList(); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/TransformProgressGatherer.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/TransformProgressGatherer.java index 23168627d442e..18a341e217294 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/TransformProgressGatherer.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/TransformProgressGatherer.java @@ -11,10 +11,13 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; +import java.util.function.Function; + /** * Utility class to gather the progress information for a given config and its cursor position */ @@ -29,17 +32,10 @@ public final class TransformProgressGatherer { public static void getInitialProgress(Client client, DataFrameTransformConfig config, ActionListener progressListener) { - SearchRequest request = client.prepareSearch(config.getSource().getIndex()) - .setSize(0) - .setAllowPartialSearchResults(false) - .setTrackTotalHits(true) - .setQuery(config.getSource().getQueryConfig().getQuery()) - .request(); + SearchRequest request = getSearchRequest(config); ActionListener searchResponseActionListener = ActionListener.wrap( - searchResponse -> { - progressListener.onResponse(new DataFrameTransformProgress(searchResponse.getHits().getTotalHits().value, null)); - }, + searchResponse -> progressListener.onResponse(searchResponseToDataFrameTransformProgressFunction().apply(searchResponse)), progressListener::onFailure ); ClientHelper.executeWithHeadersAsync(config.getHeaders(), @@ -50,4 +46,17 @@ public static void getInitialProgress(Client client, searchResponseActionListener); } + public static SearchRequest getSearchRequest(DataFrameTransformConfig config) { + SearchRequest request = new SearchRequest(config.getSource().getIndex()); + request.allowPartialSearchResults(false); + request.source(new SearchSourceBuilder() + .size(0) + .trackTotalHits(true) + .query(config.getSource().getQueryConfig().getQuery())); + return request; + } + + public static Function searchResponseToDataFrameTransformProgressFunction() { + return searchResponse -> new DataFrameTransformProgress(searchResponse.getHits().getTotalHits().value, null); + } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java index 2e7d2fbbc555d..0b6c6001ca0ef 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java @@ -93,12 +93,10 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { private final SetOnce indexLifecycleInitialisationService = new SetOnce<>(); private Settings settings; private boolean enabled; - private boolean transportClientMode; public IndexLifecycle(Settings settings) { this.settings = settings; this.enabled = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(settings); - this.transportClientMode = XPackPlugin.transportClientMode(settings); } // overridable by tests @@ -108,13 +106,7 @@ protected Clock getClock() { public Collection createGuiceModules() { List modules = new ArrayList<>(); - - if (transportClientMode) { - return modules; - } - modules.add(b -> XPackPlugin.bindFeatureSet(b, IndexLifecycleFeatureSet.class)); - return modules; } @@ -132,7 +124,7 @@ public Collection createComponents(Client client, ClusterService cluster ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - if (enabled == false || transportClientMode) { + if (enabled == false) { return emptyList(); } indexLifecycleInitialisationService.set(new IndexLifecycleService(settings, client, clusterService, threadPool, diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java index a1a37beb1d129..673c10f885447 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java @@ -77,7 +77,7 @@ import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNull.nullValue; -@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0) +@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0) public class IndexLifecycleInitialisationTests extends ESIntegTestCase { private Settings settings; private LifecyclePolicy lifecyclePolicy; @@ -109,29 +109,11 @@ protected boolean ignoreExternalCluster() { return true; } - @Override - protected Settings transportClientSettings() { - Settings.Builder settings = Settings.builder().put(super.transportClientSettings()); - settings.put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), true); - settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); - settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); - settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); - settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); - settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); - settings.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); - return settings.build(); - } - @Override protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, TestILMPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Before public void init() { settings = Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1) diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java index d3ca4dd0098b7..9de1c2f56d1fd 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java @@ -36,21 +36,15 @@ public class Logstash extends Plugin implements ActionPlugin { Pattern.quote("${logstash.template.version}"); private final boolean enabled; - private final boolean transportClientMode; public Logstash(Settings settings) { this.enabled = XPackSettings.LOGSTASH_ENABLED.get(settings); - this.transportClientMode = XPackPlugin.transportClientMode(settings); } boolean isEnabled() { return enabled; } - boolean isTransportClient() { - return transportClientMode; - } - public Collection createGuiceModules() { List modules = new ArrayList<>(); modules.add(b -> { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index 89b2ec81f87ef..128bcce67994b 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -16,7 +16,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; import org.elasticsearch.index.reindex.ReindexPlugin; +import org.elasticsearch.license.LicenseService; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.plugins.Plugin; @@ -24,9 +26,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -35,9 +36,12 @@ import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.URISyntaxException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; @@ -45,9 +49,12 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.Function; import static org.elasticsearch.test.XContentTestUtils.convertToMap; import static org.elasticsearch.test.XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.elasticsearch.xpack.security.test.SecurityTestUtils.writeFile; /** * Base class of ML integration tests that use a native autodetect process @@ -62,16 +69,34 @@ protected NamedXContentRegistry xContentRegistry() { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); + return Arrays.asList(LocalStateMachineLearning.class, Netty4Plugin.class, ReindexPlugin.class); } @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class, ReindexPlugin.class); + protected Function getClientWrapper() { + final Map headers = + Map.of("Authorization", basicAuthHeaderValue("x_pack_rest_user", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + // we need to wrap node clients because we do not specify a user for nodes and all requests will use the system + // user. This is ok for internal n2n stuff but the test framework does other things like wiping indices, repositories, etc + // that the system user cannot do. so we wrap the node client with a user that can do these things since the client() calls + // are randomized to return both node clients and transport clients + // transport clients do not need to be wrapped since we specify the xpack.security.user setting that sets the default user to be + // used for the transport client. If we did not set a default user then the transport client would not even be allowed + // to connect + return client -> client.filterWithHeader(headers); } - @Override protected Settings externalClusterClientSettings() { + final Path home = createTempDir(); + final Path xpackConf = home.resolve("config"); + try { + Files.createDirectories(xpackConf); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + writeFile(xpackConf, "users", "x_pack_rest_user" + ":" + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING + "\n"); + writeFile(xpackConf, "users_roles", "superuser:x_pack_rest_user\n"); + Path key; Path certificate; try { @@ -80,10 +105,17 @@ protected Settings externalClusterClientSettings() { } catch (URISyntaxException e) { throw new IllegalStateException("error trying to get keystore path", e); } + Settings.Builder builder = Settings.builder(); + builder.put("node.ml", false); builder.put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4); - builder.put(SecurityField.USER_SETTING.getKey(), "x_pack_rest_user:" + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); builder.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), true); + builder.put(XPackSettings.SECURITY_ENABLED.getKey(), true); + builder.put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false); + builder.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + builder.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); + builder.put(Environment.PATH_HOME_SETTING.getKey(), home); builder.put("xpack.security.transport.ssl.enabled", true); builder.put("xpack.security.transport.ssl.key", key.toAbsolutePath().toString()); builder.put("xpack.security.transport.ssl.certificate", certificate.toAbsolutePath().toString()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index f679170bc673d..42e945ffec8fd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -305,7 +305,6 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu private final Settings settings; private final Environment env; private final boolean enabled; - private final boolean transportClientMode; private final SetOnce autodetectProcessManager = new SetOnce<>(); private final SetOnce datafeedManager = new SetOnce<>(); @@ -314,8 +313,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu public MachineLearning(Settings settings, Path configPath) { this.settings = settings; this.enabled = XPackSettings.MACHINE_LEARNING_ENABLED.get(settings); - this.transportClientMode = XPackPlugin.transportClientMode(settings); - this.env = transportClientMode ? null : new Environment(settings, configPath); + this.env = new Environment(settings, configPath); } protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } @@ -349,7 +347,7 @@ public Settings additionalSettings() { String maxOpenJobsPerNodeNodeAttrName = "node.attr." + MAX_OPEN_JOBS_NODE_ATTR; String machineMemoryAttrName = "node.attr." + MACHINE_MEMORY_NODE_ATTR; - if (enabled == false || transportClientMode) { + if (enabled == false) { disallowMlNodeAttributes(mlEnabledNodeAttrName, maxOpenJobsPerNodeNodeAttrName, machineMemoryAttrName); return Settings.EMPTY; } @@ -405,7 +403,7 @@ public Collection createComponents(Client client, ClusterService cluster ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - if (enabled == false || transportClientMode) { + if (enabled == false) { // special holder for @link(MachineLearningFeatureSetUsage) which needs access to job manager, empty if ML is disabled return Collections.singletonList(new JobManagerHolder()); } @@ -506,7 +504,7 @@ public List> getPersistentTasksExecutor(ClusterServic ThreadPool threadPool, Client client, SettingsModule settingsModule) { - if (enabled == false || transportClientMode) { + if (enabled == false) { return emptyList(); } @@ -519,15 +517,9 @@ public List> getPersistentTasksExecutor(ClusterServic public Collection createGuiceModules() { List modules = new ArrayList<>(); - - if (transportClientMode) { - return modules; - } - modules.add(b -> { XPackPlugin.bindFeatureSet(b, MachineLearningFeatureSet.class); }); - return modules; } @@ -650,7 +642,7 @@ public List getRestHandlers(Settings settings, RestController restC @Override public List> getExecutorBuilders(Settings settings) { - if (false == enabled || transportClientMode) { + if (false == enabled) { return emptyList(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index bcfab50c21e00..2a5b4369cdf9a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -22,7 +22,6 @@ import org.elasticsearch.plugins.Platforms; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; @@ -76,7 +75,7 @@ public MachineLearningFeatureSet(Environment environment, ClusterService cluster // Don't try to get the native code version if ML is disabled - it causes too much controversy // if ML has been disabled because of some OS incompatibility. Also don't try to get the native // code version in the transport client - the controller process won't be running. - if (enabled && XPackPlugin.transportClientMode(environment.settings()) == false) { + if (enabled) { try { if (isRunningOnMlPlatform(true)) { NativeController nativeController = NativeControllerHolder.getNativeController(clusterService.getNodeName(), diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java index e2b137bd95d19..0472d909d0fa9 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java @@ -83,11 +83,9 @@ public class Monitoring extends Plugin implements ActionPlugin { protected final Settings settings; private final boolean enabled; - private final boolean transportClientMode; public Monitoring(Settings settings) { this.settings = settings; - this.transportClientMode = XPackPlugin.transportClientMode(settings); this.enabled = XPackSettings.MONITORING_ENABLED.get(settings); } @@ -100,16 +98,12 @@ boolean isEnabled() { return enabled; } - boolean isTransportClient() { - return transportClientMode; - } - @Override public Collection createGuiceModules() { List modules = new ArrayList<>(); modules.add(b -> { XPackPlugin.bindFeatureSet(b, MonitoringFeatureSet.class); - if (transportClientMode || enabled == false) { + if (enabled == false) { b.bind(MonitoringService.class).toProvider(Providers.of(null)); b.bind(Exporters.class).toProvider(Providers.of(null)); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringPluginClientTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringPluginClientTests.java index 6fb967c79782e..b4dc9b3112ece 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringPluginClientTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringPluginClientTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.monitoring; import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -14,17 +13,6 @@ public class MonitoringPluginClientTests extends ESTestCase { - public void testModulesWithClientSettings() throws Exception { - Settings settings = Settings.builder() - .put("path.home", createTempDir()) - .put(Client.CLIENT_TYPE_SETTING_S.getKey(), TransportClient.CLIENT_TYPE) - .build(); - - Monitoring plugin = new Monitoring(settings); - assertThat(plugin.isEnabled(), is(true)); - assertThat(plugin.isTransportClient(), is(true)); - } - public void testModulesWithNodeSettings() throws Exception { // these settings mimic what ES does when running as a node... Settings settings = Settings.builder() @@ -33,6 +21,5 @@ public void testModulesWithNodeSettings() throws Exception { .build(); Monitoring plugin = new Monitoring(settings); assertThat(plugin.isEnabled(), is(true)); - assertThat(plugin.isTransportClient(), is(false)); } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java index 8ebbf1bccf864..faa713efb7d11 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java @@ -108,12 +108,10 @@ public class Rollup extends Plugin implements ActionPlugin, PersistentTaskPlugin private final SetOnce schedulerEngine = new SetOnce<>(); private final Settings settings; private final boolean enabled; - private final boolean transportClientMode; public Rollup(Settings settings) { this.settings = settings; this.enabled = XPackSettings.ROLLUP_ENABLED.get(settings); - this.transportClientMode = XPackPlugin.transportClientMode(settings); } @Override @@ -127,10 +125,6 @@ public Collection createComponents(Client client, ClusterService cluster @Override public Collection createGuiceModules() { List modules = new ArrayList<>(); - - if (transportClientMode) { - return modules; - } modules.add(b -> XPackPlugin.bindFeatureSet(b, RollupFeatureSet.class)); return modules; } @@ -178,7 +172,7 @@ public List getRestHandlers(Settings settings, RestController restC @Override public List> getExecutorBuilders(Settings settings) { - if (false == enabled || transportClientMode) { + if (false == enabled) { return emptyList(); } @@ -193,7 +187,7 @@ public List> getPersistentTasksExecutor(ClusterServic ThreadPool threadPool, Client client, SettingsModule settingsModule) { - if (enabled == false || transportClientMode ) { + if (enabled == false) { return emptyList(); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index a6218522fb7e5..c7ada6e79a9ac 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -270,7 +270,6 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw private final Settings settings; private final Environment env; private final boolean enabled; - private final boolean transportClientMode; /* what a PITA that we need an extra indirection to initialize this. Yet, once we got rid of guice we can thing about how * to fix this or make it simpler. Today we need several service that are created in createComponents but we need to register * an instance of TransportInterceptor way earlier before createComponents is called. */ @@ -293,10 +292,9 @@ public Security(Settings settings, final Path configPath) { Security(Settings settings, final Path configPath, List extensions) { this.settings = settings; - this.transportClientMode = XPackPlugin.transportClientMode(settings); - this.env = transportClientMode ? null : new Environment(settings, configPath); + this.env = new Environment(settings, configPath); this.enabled = XPackSettings.SECURITY_ENABLED.get(settings); - if (enabled && transportClientMode == false) { + if (enabled) { runStartupChecks(settings); // we load them all here otherwise we can't access secure settings since they are closed once the checks are // fetched @@ -327,24 +325,11 @@ private static void runStartupChecks(Settings settings) { @Override public Collection createGuiceModules() { List modules = new ArrayList<>(); - if (enabled == false || transportClientMode) { + if (enabled == false) { modules.add(b -> b.bind(IPFilter.class).toProvider(Providers.of(null))); } - - if (transportClientMode) { - if (enabled == false) { - return modules; - } - modules.add(b -> { - // for transport client we still must inject these ssl classes with guice - b.bind(SSLService.class).toInstance(getSslService()); - }); - - return modules; - } modules.add(b -> XPackPlugin.bindFeatureSet(b, SecurityFeatureSet.class)); - if (enabled == false) { modules.add(b -> { b.bind(Realms.class).toProvider(Providers.of(null)); // for SecurityFeatureSet @@ -568,12 +553,12 @@ private AuthenticationFailureHandler createAuthenticationFailureHandler(final Re @Override public Settings additionalSettings() { - return additionalSettings(settings, enabled, transportClientMode); + return additionalSettings(settings, enabled); } // visible for tests - static Settings additionalSettings(final Settings settings, final boolean enabled, final boolean transportClientMode) { - if (enabled && transportClientMode == false) { + static Settings additionalSettings(final Settings settings, final boolean enabled) { + if (enabled) { final Settings.Builder builder = Settings.builder(); builder.put(SecuritySettings.addTransportSettings(settings)); @@ -606,19 +591,15 @@ static Settings additionalSettings(final Settings settings, final boolean enable @Override public List> getSettings() { - return getSettings(transportClientMode, securityExtensions); + return getSettings(securityExtensions); } /** * Get the {@link Setting setting configuration} for all security components, including those defined in extensions. */ - public static List> getSettings(boolean transportClientMode, List securityExtensions) { + public static List> getSettings(List securityExtensions) { List> settingsList = new ArrayList<>(); - if (transportClientMode) { - return settingsList; - } - // The following just apply in node mode settingsList.add(XPackSettings.FIPS_MODE_ENABLED); @@ -657,9 +638,6 @@ public static List> getSettings(boolean transportClientMode, List getRestHeaders() { - if (transportClientMode) { - return Collections.emptyList(); - } Set headers = new HashSet<>(); headers.add(UsernamePasswordToken.BASIC_AUTH_HEADER); if (XPackSettings.AUDIT_ENABLED.get(settings)) { @@ -773,11 +751,7 @@ public List getActionFilters() { if (enabled == false) { return emptyList(); } - // registering the security filter only for nodes - if (transportClientMode == false) { - return singletonList(securityActionFilter.get()); - } - return emptyList(); + return singletonList(securityActionFilter.get()); } @Override @@ -865,7 +839,7 @@ static void validateRealmSettings(Settings settings) { @Override public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, ThreadContext threadContext) { - if (transportClientMode || enabled == false) { // don't register anything if we are not enabled + if (enabled == false) { // don't register anything if we are not enabled // interceptors are not installed if we are running on the transport client return Collections.emptyList(); } @@ -890,7 +864,7 @@ public AsyncSender interceptSender(AsyncSender sender) { public Map> getTransports(Settings settings, ThreadPool threadPool, PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { - if (transportClientMode || enabled == false) { // don't register anything if we are not enabled, or in transport client mode + if (enabled == false) { // don't register anything if we are not enabled, or in transport client mode return Collections.emptyMap(); } @@ -944,7 +918,7 @@ public Map> getHttpTransports(Settings set @Override public UnaryOperator getRestHandlerWrapper(ThreadContext threadContext) { - if (enabled == false || transportClientMode) { + if (enabled == false) { return null; } final boolean ssl = HTTP_SSL_ENABLED.get(settings); @@ -955,7 +929,7 @@ public UnaryOperator getRestHandlerWrapper(ThreadContext threadCont @Override public List> getExecutorBuilders(final Settings settings) { - if (enabled && transportClientMode == false) { + if (enabled) { return Collections.singletonList( new FixedExecutorBuilder(settings, TokenService.THREAD_POOL_NAME, 1, 1000, "xpack.security.authc.token.thread_pool")); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/AbstractSecurityModule.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/AbstractSecurityModule.java deleted file mode 100644 index 0dfb369bc371f..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/AbstractSecurityModule.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.support; - -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.core.XPackSettings; - -public abstract class AbstractSecurityModule extends AbstractModule { - - protected final Settings settings; - protected final boolean clientMode; - protected final boolean securityEnabled; - - public AbstractSecurityModule(Settings settings) { - this.settings = settings; - this.clientMode = TransportClient.CLIENT_TYPE.equals(settings.get(Client.CLIENT_TYPE_SETTING_S.getKey())); - this.securityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); - } - - @Override - protected final void configure() { - configure(clientMode); - } - - protected abstract void configure(boolean clientMode); - - public abstract static class Node extends AbstractSecurityModule { - - protected Node(Settings settings) { - super(settings); - } - - @Override - protected final void configure(boolean clientMode) { - assert !clientMode : "[" + getClass().getSimpleName() + "] is a node only module"; - configureNode(); - } - - protected abstract void configureNode(); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java index bb0036e9f870a..db5a22c5e6e9d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java @@ -42,7 +42,7 @@ public Settings nodeSettings(int nodeOrdinal) { public void testThatBulkUpdateDoesNotLoseFields() { assertEquals(DocWriteResponse.Result.CREATED, client().prepareIndex("index1", "type").setSource("{\"test\": \"test\"}", XContentType.JSON).setId("1").get().getResult()); - GetResponse getResponse = internalCluster().transportClient().prepareGet("index1", "type", "1").get(); + GetResponse getResponse = client().prepareGet("index1", "type", "1").get(); assertEquals("test", getResponse.getSource().get("test")); if (randomBoolean()) { @@ -50,9 +50,9 @@ public void testThatBulkUpdateDoesNotLoseFields() { } // update with a new field - assertEquals(DocWriteResponse.Result.UPDATED, internalCluster().transportClient().prepareUpdate("index1", "type", "1") + assertEquals(DocWriteResponse.Result.UPDATED, client().prepareUpdate("index1", "type", "1") .setDoc("{\"not test\": \"not test\"}", XContentType.JSON).get().getResult()); - getResponse = internalCluster().transportClient().prepareGet("index1", "type", "1").get(); + getResponse = client().prepareGet("index1", "type", "1").get(); assertEquals("test", getResponse.getSource().get("test")); assertEquals("not test", getResponse.getSource().get("not test")); @@ -61,10 +61,10 @@ public void testThatBulkUpdateDoesNotLoseFields() { flushAndRefresh(); // do it in a bulk - BulkResponse response = internalCluster().transportClient().prepareBulk().add(client().prepareUpdate("index1", "type", "1") + BulkResponse response = client().prepareBulk().add(client().prepareUpdate("index1", "type", "1") .setDoc("{\"bulk updated\": \"bulk updated\"}", XContentType.JSON)).get(); assertEquals(DocWriteResponse.Result.UPDATED, response.getItems()[0].getResponse().getResult()); - getResponse = internalCluster().transportClient().prepareGet("index1", "type", "1").get(); + getResponse = client().prepareGet("index1", "type", "1").get(); assertEquals("test", getResponse.getSource().get("test")); assertEquals("not test", getResponse.getSource().get("not test")); assertEquals("bulk updated", getResponse.getSource().get("bulk updated")); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java index 6d7eacfe26cfa..2a9f2017c50f3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java @@ -74,7 +74,7 @@ protected boolean addMockHttpTransport() { } public void testModifyingViaApiClearsCache() throws Exception { - Client client = internalCluster().transportClient(); + Client client = client(); SecurityClient securityClient = securityClient(client); int modifiedRolesCount = randomIntBetween(1, roles.length); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index 3055d1b0f456b..5e83ef99563d9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -64,7 +64,7 @@ import static org.hamcrest.Matchers.nullValue; // The random usage of meta fields such as _timestamp add noise to the test, so disable random index templates: -@ESIntegTestCase.ClusterScope +@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public class FieldLevelSecurityTests extends SecurityIntegTestCase { protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java index da03e9ffe3d1e..2035a8b6c19dd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java @@ -125,7 +125,7 @@ public void testSingleRole() throws Exception { refresh(); - Client client = internalCluster().transportClient(); + Client client = client(); // no specifying an index, should replace indices with the permitted ones (test & test1) SearchResponse searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); @@ -246,7 +246,7 @@ public void testMultipleRoles() throws Exception { refresh(); - Client client = internalCluster().transportClient(); + Client client = client(); SearchResponse response = client .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", USERS_PASSWD))) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/PermissionPrecedenceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/PermissionPrecedenceTests.java index c07491dc86314..fb85061110e08 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/PermissionPrecedenceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/PermissionPrecedenceTests.java @@ -72,24 +72,14 @@ protected SecureString nodeClientPassword() { return new SecureString("test123".toCharArray()); } - @Override - protected String transportClientUsername() { - return "admin"; - } - - @Override - protected SecureString transportClientPassword() { - return new SecureString("test123".toCharArray()); - } - public void testDifferentCombinationsOfIndices() throws Exception { - Client client = internalCluster().transportClient(); + Client client = client(); // first lets try with "admin"... all should work AcknowledgedResponse putResponse = client .filterWithHeader(Collections.singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, - basicAuthHeaderValue(transportClientUsername(), transportClientPassword()))) + basicAuthHeaderValue(nodeClientUsername(), nodeClientPassword()))) .admin().indices().preparePutTemplate("template1") .setPatterns(Collections.singletonList("test_*")) .get(); @@ -103,7 +93,7 @@ public void testDifferentCombinationsOfIndices() throws Exception { // now lets try with "user" Map auth = Collections.singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, basicAuthHeaderValue("user", - transportClientPassword())); + nodeClientPassword())); assertThrowsAuthorizationException(client.filterWithHeader(auth).admin().indices().preparePutTemplate("template1") .setPatterns(Collections.singletonList("test_*"))::get, PutIndexTemplateAction.NAME, "user"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java index 9f86887566ac4..4c189e3e7f3da 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java @@ -93,7 +93,7 @@ public void testThatClearingAllScrollIdsWorks() throws Exception { Map headers = new HashMap<>(); headers.put(SecurityField.USER_SETTING.getKey(), user); headers.put(BASIC_AUTH_HEADER, basicAuth); - ClearScrollResponse clearScrollResponse = internalCluster().transportClient().filterWithHeader(headers) + ClearScrollResponse clearScrollResponse = client().filterWithHeader(headers) .prepareClearScroll() .addScrollId("_all").get(); assertThat(clearScrollResponse.isSucceeded(), is(true)); @@ -107,7 +107,7 @@ public void testThatClearingAllScrollIdsRequirePermissions() throws Exception { Map headers = new HashMap<>(); headers.put(SecurityField.USER_SETTING.getKey(), user); headers.put(BASIC_AUTH_HEADER, basicAuth); - assertThrows(internalCluster().transportClient().filterWithHeader(headers) + assertThrows(client().filterWithHeader(headers) .prepareClearScroll() .addScrollId("_all"), ElasticsearchSecurityException.class, "action [cluster:admin/indices/scroll/clear_all] is unauthorized for user [denied_user]"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java index 87db72bcf0285..349bef3fc3152 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java @@ -18,7 +18,7 @@ /** * Integration test that uses multiple data nodes to test that the shrink index api works with security. */ -@ClusterScope(minNumDataNodes = 2) +@ClusterScope(minNumDataNodes = 2, transportClientRatio = 0.0) public class ShrinkIndexWithSecurityTests extends SecurityIntegTestCase { @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java index 671a94452fa0a..78d95ecbca0b8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java @@ -46,7 +46,7 @@ public void stopESNativeStores() throws Exception { if (getCurrentClusterScope() == Scope.SUITE) { // Clear the realm cache for all realms since we use a SUITE scoped cluster - SecurityClient client = securityClient(internalCluster().transportClient()); + SecurityClient client = securityClient(client()); client.prepareClearRealmCache().get(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index 462e4e26541e6..d862d248976da 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -7,7 +7,6 @@ import io.netty.util.ThreadDeathWatcher; import io.netty.util.concurrent.GlobalEventExecutor; - import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -26,7 +25,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -40,7 +38,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.client.SecurityClient; @@ -77,6 +74,7 @@ * * @see SecuritySettingsSource */ +@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public abstract class SecurityIntegTestCase extends ESIntegTestCase { private static SecuritySettingsSource SECURITY_DEFAULT_SETTINGS; @@ -260,14 +258,6 @@ protected Path nodeConfigPath(int nodeOrdinal) { return customSecuritySettingsSource.nodeConfigPath(nodeOrdinal); } - @Override - protected Settings transportClientSettings() { - return Settings.builder().put(super.transportClientSettings()) - .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NIO) - .put(customSecuritySettingsSource.transportClientSettings()) - .build(); - } - @Override protected boolean addMockTransportService() { return false; // security has its own transport service @@ -278,19 +268,6 @@ protected Collection> nodePlugins() { return customSecuritySettingsSource.nodePlugins(); } - @Override - protected Collection> transportClientPlugins() { - return customSecuritySettingsSource.transportClientPlugins(); - } - - @Override - protected Settings externalClusterClientSettings() { - return Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), SecuritySettingsSource.TEST_USER_NAME + ":" - + SecuritySettingsSourceField.TEST_PASSWORD) - .build(); - } - /** * Allows to override the users config file when the {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} @@ -333,24 +310,6 @@ protected SecureString nodeClientPassword() { return SECURITY_DEFAULT_SETTINGS.nodeClientPassword(); } - /** - * Allows to override the transport client username (used while sending requests to the test cluster) when the - * {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to - * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} - */ - protected String transportClientUsername() { - return SECURITY_DEFAULT_SETTINGS.transportClientUsername(); - } - - /** - * Allows to override the transport client password (used while sending requests to the test cluster) when the - * {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to - * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} - */ - protected SecureString transportClientPassword() { - return SECURITY_DEFAULT_SETTINGS.transportClientPassword(); - } - /** * Allows to control whether ssl key information is auto generated or not on the transport layer */ @@ -392,16 +351,6 @@ protected String nodeClientUsername() { protected SecureString nodeClientPassword() { return SecurityIntegTestCase.this.nodeClientPassword(); } - - @Override - protected String transportClientUsername() { - return SecurityIntegTestCase.this.transportClientUsername(); - } - - @Override - protected SecureString transportClientPassword() { - return SecurityIntegTestCase.this.transportClientPassword(); - } } protected static void assertGreenClusterState(Client client) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 8e6e00f32a90e..768bc38813c0b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -118,8 +118,8 @@ protected SSLService getSslService() { }; ThreadPool threadPool = mock(ThreadPool.class); ClusterService clusterService = mock(ClusterService.class); - settings = Security.additionalSettings(settings, true, false); - Set> allowedSettings = new HashSet<>(Security.getSettings(false, null)); + settings = Security.additionalSettings(settings, true); + Set> allowedSettings = new HashSet<>(Security.getSettings(null)); allowedSettings.addAll(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterSettings clusterSettings = new ClusterSettings(settings, allowedSettings); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); @@ -180,7 +180,7 @@ public void testDisabledByDefault() throws Exception { } public void testHttpSettingDefaults() throws Exception { - final Settings defaultSettings = Security.additionalSettings(Settings.EMPTY, true, false); + final Settings defaultSettings = Security.additionalSettings(Settings.EMPTY, true); assertThat(SecurityField.NAME4, equalTo(NetworkModule.TRANSPORT_TYPE_SETTING.get(defaultSettings))); assertThat(SecurityField.NAME4, equalTo(NetworkModule.HTTP_TYPE_SETTING.get(defaultSettings))); } @@ -189,7 +189,7 @@ public void testTransportSettingNetty4Both() { Settings both4 = Security.additionalSettings(Settings.builder() .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4) .put(NetworkModule.HTTP_TYPE_KEY, SecurityField.NAME4) - .build(), true, false); + .build(), true); assertFalse(NetworkModule.TRANSPORT_TYPE_SETTING.exists(both4)); assertFalse(NetworkModule.HTTP_TYPE_SETTING.exists(both4)); } @@ -198,13 +198,13 @@ public void testTransportSettingValidation() { final String badType = randomFrom("netty4", "other", "security1"); Settings settingsTransport = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, badType).build(); IllegalArgumentException badTransport = expectThrows(IllegalArgumentException.class, - () -> Security.additionalSettings(settingsTransport, true, false)); + () -> Security.additionalSettings(settingsTransport, true)); assertThat(badTransport.getMessage(), containsString(SecurityField.NAME4)); assertThat(badTransport.getMessage(), containsString(NetworkModule.TRANSPORT_TYPE_KEY)); Settings settingsHttp = Settings.builder().put(NetworkModule.HTTP_TYPE_KEY, badType).build(); IllegalArgumentException badHttp = expectThrows(IllegalArgumentException.class, - () -> Security.additionalSettings(settingsHttp, true, false)); + () -> Security.additionalSettings(settingsHttp, true)); assertThat(badHttp.getMessage(), containsString(SecurityField.NAME4)); assertThat(badHttp.getMessage(), containsString(NetworkModule.HTTP_TYPE_KEY)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java index f6e5552ddbc53..b04b8c8ac3d36 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java @@ -32,7 +32,7 @@ * templates when started within security, as this requires certain * system privileges */ -@ClusterScope(maxNumDataNodes = 1, scope = Scope.SUITE, numClientNodes = 0) +@ClusterScope(maxNumDataNodes = 1, scope = Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0) public class TemplateUpgraderTests extends SecurityIntegTestCase { public void testTemplatesWorkAsExpected() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java index 23408f5668ec9..866c52989af6f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java @@ -29,7 +29,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -@ClusterScope(scope = TEST, numDataNodes = 1) +@ClusterScope(scope = TEST, numDataNodes = 1, transportClientRatio = 0.0) public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase { private static Settings startupFilterSettings; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java index f507edf97874f..3290aba27e37f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.SearchContextMissingException; import org.elasticsearch.test.SecurityIntegTestCase; @@ -99,14 +98,4 @@ public void testSearchAndClearScroll() throws Exception { public void cleanupSecurityIndex() throws Exception { super.deleteSecurityIndex(); } - - @Override - public String transportClientUsername() { - return this.nodeClientUsername(); - } - - @Override - public SecureString transportClientPassword() { - return this.nodeClientPassword(); - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java index bc17626b1f426..9f0b7863d30e7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.security.transport.filter; -import org.elasticsearch.client.Client; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -61,11 +60,6 @@ public void testThatIpFilteringIsIntegratedIntoNettyPipelineViaHttp() throws Exc } } - public void testThatIpFilteringIsNotAppliedForDefaultTransport() throws Exception { - Client client = internalCluster().transportClient(); - assertGreenClusterState(client); - } - public void testThatIpFilteringIsAppliedForProfile() throws Exception { try (Socket socket = new Socket()){ trySocketConnection(socket, getProfileAddress("client")); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java index 65a5fb080cdb0..96922aa8822e4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java @@ -21,7 +21,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.is; -@ClusterScope(scope = TEST, supportsDedicatedMasters = false, numDataNodes = 1) +@ClusterScope(scope = TEST, supportsDedicatedMasters = false, numDataNodes = 1, transportClientRatio = 0.0) public class IpFilteringUpdateTests extends SecurityIntegTestCase { private static int randomClientPort; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IPHostnameVerificationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IPHostnameVerificationTests.java deleted file mode 100644 index fe1b65e851d0f..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IPHostnameVerificationTests.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.transport.netty4; - -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.transport.TransportSettings; -import org.elasticsearch.xpack.core.ssl.SSLClientAuth; - -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.List; - -import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; -import static org.hamcrest.CoreMatchers.is; - -// TODO delete this test? -public class IPHostnameVerificationTests extends SecurityIntegTestCase { - private Path certPath; - private Path keyPath; - - @Override - protected boolean transportSSLEnabled() { - return true; - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - Settings settings = super.nodeSettings(nodeOrdinal); - Settings.Builder builder = Settings.builder() - .put(settings.filter((s) -> s.startsWith("xpack.security.transport.ssl.") == false), false); - settings = builder.build(); - - // The default Unicast test behavior is to use 'localhost' with the port number. For this test we need to use IP - List newUnicastAddresses = new ArrayList<>(); - for (String address : settings.getAsList(DISCOVERY_SEED_HOSTS_SETTING.getKey())) { - newUnicastAddresses.add(address.replace("localhost", "127.0.0.1")); - } - - Settings.Builder settingsBuilder = Settings.builder() - .put(settings) - .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey(), newUnicastAddresses); - - try { - //Use a cert with a CN of "Elasticsearch Test Node" and IPv4+IPv6 ip addresses as SubjectAlternativeNames - certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-ip-only.crt"); - keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-ip-only.pem"); - assertThat(Files.exists(certPath), is(true)); - } catch (Exception e) { - throw new RuntimeException(e); - } - - SecuritySettingsSource.addSecureSettings(settingsBuilder, secureSettings -> { - secureSettings.setString("xpack.security.transport.ssl.secure_key_passphrase", "testnode-ip-only"); - }); - return settingsBuilder.put("xpack.security.transport.ssl.key", keyPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.certificate", certPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.certificate_authorities", certPath.toAbsolutePath()) - .put(TransportSettings.BIND_HOST.getKey(), "127.0.0.1") - .put("network.host", "127.0.0.1") - .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.NONE) - .put("xpack.security.transport.ssl.verification_mode", "full") - .build(); - } - - @Override - protected Settings transportClientSettings() { - Settings clientSettings = super.transportClientSettings(); - return Settings.builder().put(clientSettings.filter(k -> k.startsWith("xpack.security.transport.ssl.") == false)) - .put("xpack.security.transport.ssl.verification_mode", "certificate") - .put("xpack.security.transport.ssl.key", keyPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.certificate", certPath.toAbsolutePath()) - .put("xpack.security.transport.ssl.key_passphrase", "testnode-ip-only") - .put("xpack.security.transport.ssl.certificate_authorities", certPath) - .build(); - } - - public void testTransportClientConnectionWorksWithIPOnlyHostnameVerification() throws Exception { - Client client = internalCluster().transportClient(); - assertGreenClusterState(client); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java index ce0cc5c111265..88d27d4171a19 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java @@ -119,7 +119,7 @@ public void testThatHttpWorksWithSslClientAuth() throws IOException { try (RestClient restClient = createRestClient(httpClientBuilder -> httpClientBuilder.setSSLStrategy(sessionStrategy), "https")) { Request request = new Request("GET", "/"); RequestOptions.Builder options = request.getOptions().toBuilder(); - options.addHeader("Authorization", basicAuthHeaderValue(transportClientUsername(), transportClientPassword())); + options.addHeader("Authorization", basicAuthHeaderValue(nodeClientUsername(), nodeClientPassword())); request.setOptions(options); Response response = restClient.performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java index 944c3306763a6..9c540f559b688 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java @@ -49,7 +49,7 @@ * * @see RestrictedTrustManager */ -@ESIntegTestCase.ClusterScope(numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) +@ESIntegTestCase.ClusterScope(numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false, transportClientRatio = 0.0) @TestLogging("org.elasticsearch.xpack.ssl.RestrictedTrustManager:DEBUG") public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { @@ -149,15 +149,6 @@ private void writeRestrictions(String trustedPattern) { runResourceWatcher(); } - @Override - protected Settings transportClientSettings() { - Settings parentSettings = super.transportClientSettings(); - Settings.Builder builder = Settings.builder() - .put(parentSettings.filter((s) -> s.startsWith("xpack.security.transport.ssl.") == false)) - .put(nodeSSL); - return builder.build(); - } - @Override protected boolean transportSSLEnabled() { return true; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index dd7f268e1a6f5..4226ff4fed0d9 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -226,16 +226,14 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin, Reloa private BulkProcessor bulkProcessor; protected final Settings settings; - protected final boolean transportClient; protected final boolean enabled; protected List reloadableServices = new ArrayList<>(); public Watcher(final Settings settings) { this.settings = settings; - this.transportClient = XPackPlugin.transportClientMode(settings); this.enabled = XPackSettings.WATCHER_ENABLED.get(settings); - if (enabled && transportClient == false) { + if (enabled) { validAutoCreateIndex(settings, logger); } } @@ -433,7 +431,7 @@ public Collection createGuiceModules() { modules.add(b -> b.bind(Clock.class).toInstance(getClock())); //currently assuming the only place clock is bound modules.add(b -> { XPackPlugin.bindFeatureSet(b, WatcherFeatureSet.class); - if (transportClient || enabled == false) { + if (enabled == false) { b.bind(WatcherService.class).toProvider(Providers.of(null)); } }); @@ -567,7 +565,7 @@ public List getRestHandlers(Settings settings, RestController restC @Override public void onIndexModule(IndexModule module) { - if (enabled == false || transportClient) { + if (enabled == false) { return; } @@ -676,7 +674,7 @@ public void close() throws IOException { */ @Override public void reload(Settings settings) { - if (enabled == false || transportClient) { + if (enabled == false) { return; } reloadableServices.forEach(s -> s.reload(settings)); diff --git a/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java b/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java index 0a75565fbc075..855162a0b86fc 100644 --- a/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java +++ b/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java @@ -5,91 +5,175 @@ */ package org.elasticsearch.xpack.security; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.reindex.DeleteByQueryAction; -import org.elasticsearch.index.reindex.DeleteByQueryRequestBuilder; -import org.elasticsearch.index.reindex.ReindexAction; -import org.elasticsearch.index.reindex.ReindexRequestBuilder; -import org.elasticsearch.index.reindex.UpdateByQueryAction; -import org.elasticsearch.index.reindex.UpdateByQueryRequestBuilder; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.xpack.core.security.SecurityField; - - +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URL; +import java.nio.file.Path; +import java.util.Collections; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + + +public class ReindexWithSecurityIT extends ESRestTestCase { + + private static final String USER = "test_admin"; + private static final String PASS = "x-pack-test-password"; + + private static Path httpTrustStore; + + @BeforeClass + public static void findTrustStore( ) throws Exception { + final URL resource = ReindexWithSecurityClientYamlTestSuiteIT.class.getResource("/ssl/ca.p12"); + if (resource == null) { + throw new FileNotFoundException("Cannot find classpath resource /ssl/ca.p12"); + } + httpTrustStore = PathUtils.get(resource.toURI()); + } -public class ReindexWithSecurityIT extends SecurityIntegTestCase { + @AfterClass + public static void cleanupStatics() { + httpTrustStore = null; + } @Override - protected Settings externalClusterClientSettings() { - Settings.Builder builder = Settings.builder().put(super.externalClusterClientSettings()); - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4); - builder.put(SecurityField.USER_SETTING.getKey(), "test_admin:x-pack-test-password"); - return builder.build(); + protected String getProtocol() { + return "https"; } /** - * TODO: this entire class should be removed. SecurityIntegTestCase is meant for tests, but we run against real xpack + * All tests run as a an administrative user but use es-security-runas-user to become a less privileged user. */ @Override - public void doAssertXPackIsInstalled() { - // this assertion doesn't make sense with a real distribution, since there is not currently a way - // from nodes info to see which modules are loaded + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .put(TRUSTSTORE_PATH , httpTrustStore) + .put(TRUSTSTORE_PASSWORD, "password") + .build(); } - public void testDeleteByQuery() { + public void testDeleteByQuery() throws IOException { createIndicesWithRandomAliases("test1", "test2", "test3"); - BulkByScrollResponse response = new DeleteByQueryRequestBuilder(client(), DeleteByQueryAction.INSTANCE) - .source("test1", "test2") - .filter(QueryBuilders.matchAllQuery()) - .get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + BulkByScrollResponse response = restClient.deleteByQuery((DeleteByQueryRequest) new DeleteByQueryRequest() + .setQuery(QueryBuilders.matchAllQuery()) + .indices("test1", "test2"), RequestOptions.DEFAULT); assertNotNull(response); - response = new DeleteByQueryRequestBuilder(client(), DeleteByQueryAction.INSTANCE) - .source("test*") - .filter(QueryBuilders.matchAllQuery()) - .get(); + response = restClient.deleteByQuery((DeleteByQueryRequest) new DeleteByQueryRequest() + .setQuery(QueryBuilders.matchAllQuery()) + .indices("test*"), RequestOptions.DEFAULT); assertNotNull(response); - IndexNotFoundException e = expectThrows(IndexNotFoundException.class, - () -> new DeleteByQueryRequestBuilder(client(), DeleteByQueryAction.INSTANCE) - .source("test1", "index1") - .filter(QueryBuilders.matchAllQuery()) - .get()); - assertEquals("no such index [index1]", e.getMessage()); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> restClient.deleteByQuery((DeleteByQueryRequest) new DeleteByQueryRequest() + .setQuery(QueryBuilders.matchAllQuery()) + .indices("test1", "index1"), RequestOptions.DEFAULT)); + assertThat(e.getMessage(), containsString("no such index [index1]")); } - public void testUpdateByQuery() { + public void testUpdateByQuery() throws IOException { createIndicesWithRandomAliases("test1", "test2", "test3"); - BulkByScrollResponse response = new UpdateByQueryRequestBuilder(client(), UpdateByQueryAction.INSTANCE) - .source("test1", "test2").get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + BulkByScrollResponse response = + restClient.updateByQuery((UpdateByQueryRequest) new UpdateByQueryRequest().indices("test1", "test2"), RequestOptions.DEFAULT); assertNotNull(response); - response = new UpdateByQueryRequestBuilder(client(), UpdateByQueryAction.INSTANCE).source("test*").get(); + response = restClient.updateByQuery((UpdateByQueryRequest) new UpdateByQueryRequest().indices("test*"), RequestOptions.DEFAULT); assertNotNull(response); - IndexNotFoundException e = expectThrows(IndexNotFoundException.class, - () -> new UpdateByQueryRequestBuilder(client(), UpdateByQueryAction.INSTANCE).source("test1", "index1").get()); - assertEquals("no such index [index1]", e.getMessage()); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> restClient.updateByQuery((UpdateByQueryRequest) new UpdateByQueryRequest().indices("test1", "index1"), + RequestOptions.DEFAULT)); + assertThat(e.getMessage(), containsString("no such index [index1]")); } - public void testReindex() { + public void testReindex() throws IOException { createIndicesWithRandomAliases("test1", "test2", "test3", "dest"); - BulkByScrollResponse response = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("test1", "test2") - .destination("dest").get(); + RestHighLevelClient restClient = new TestRestHighLevelClient(); + BulkByScrollResponse response = restClient.reindex(new ReindexRequest().setSourceIndices("test1", "test2").setDestIndex("dest"), + RequestOptions.DEFAULT); assertNotNull(response); - response = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("test*").destination("dest").get(); + response = restClient.reindex(new ReindexRequest().setSourceIndices("test*").setDestIndex("dest"), + RequestOptions.DEFAULT); assertNotNull(response); - IndexNotFoundException e = expectThrows(IndexNotFoundException.class, - () -> new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("test1", "index1").destination("dest").get()); - assertEquals("no such index [index1]", e.getMessage()); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> restClient.reindex(new ReindexRequest().setSourceIndices("test1", "index1").setDestIndex("dest"), + RequestOptions.DEFAULT)); + assertThat(e.getMessage(), containsString("no such index [index1]")); + } + + /** + * Creates the indices provided as argument, randomly associating them with aliases, indexes one dummy document per index + * and refreshes the new indices + */ + private void createIndicesWithRandomAliases(String... indices) throws IOException { + for (String index : indices) { + createIndex(index, Settings.EMPTY); + } + + RestHighLevelClient restClient = new TestRestHighLevelClient(); + if (frequently()) { + boolean aliasAdded = false; + + IndicesAliasesRequest request = new IndicesAliasesRequest(); + for (String index : indices) { + if (frequently()) { + //one alias per index with prefix "alias-" + request.addAliasAction(AliasActions.add().index(index).alias("alias-" + index)); + aliasAdded = true; + } + } + // If we get to this point and we haven't added an alias to the request we need to add one + // or the request will fail so use noAliasAdded to force adding the alias in this case + if (aliasAdded == false || randomBoolean()) { + //one alias pointing to all indices + for (String index : indices) { + request.addAliasAction(AliasActions.add().index(index).alias("alias")); + } + } + AcknowledgedResponse response = restClient.indices().updateAliases(request, RequestOptions.DEFAULT); + assertThat(response.isAcknowledged(), is(true)); + } + + for (String index : indices) { + restClient.index(new IndexRequest(index).source("field", "value"), RequestOptions.DEFAULT); + } + restClient.indices().refresh(new RefreshRequest(indices), RequestOptions.DEFAULT); + } + + private class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), restClient -> {}, Collections.emptyList()); + } } } diff --git a/x-pack/qa/security-client-tests/build.gradle b/x-pack/qa/security-client-tests/build.gradle deleted file mode 100644 index 556e36e51467f..0000000000000 --- a/x-pack/qa/security-client-tests/build.gradle +++ /dev/null @@ -1,40 +0,0 @@ -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') -} - -String outputDir = "${buildDir}/generated-resources/${project.name}" -task copyXPackPluginProps(type: Copy) { - from project(xpackModule('core')).file('src/main/plugin-metadata') - from project(xpackModule('core')).tasks.pluginProperties - into outputDir -} -project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) - -integTestRunner { - systemProperty 'tests.security.manager', 'false' -} - -integTestCluster { - setting 'xpack.ilm.enabled', 'false' - setting 'xpack.security.enabled', 'true' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' - setupCommand 'setupDummyUser', - 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - setupCommand 'setupTransportClientUser', - 'bin/elasticsearch-users', 'useradd', 'transport', '-p', 'x-pack-test-password', '-r', 'transport_client' - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'test_user', - password: 'x-pack-test-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } -} diff --git a/x-pack/qa/security-client-tests/src/test/java/org/elasticsearch/xpack/security/qa/SecurityTransportClientIT.java b/x-pack/qa/security-client-tests/src/test/java/org/elasticsearch/xpack/security/qa/SecurityTransportClientIT.java deleted file mode 100644 index 519f365d515a0..0000000000000 --- a/x-pack/qa/security-client-tests/src/test/java/org/elasticsearch/xpack/security/qa/SecurityTransportClientIT.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.qa; - -import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -import org.elasticsearch.xpack.core.security.SecurityField; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.is; - -/** - * Integration tests that test a transport client with security being loaded that connect to an external cluster - */ -public class SecurityTransportClientIT extends ESIntegTestCase { - static final String ADMIN_USER_PW = "test_user:x-pack-test-password"; - static final String TRANSPORT_USER_PW = "transport:x-pack-test-password"; - - @Override - protected Settings externalClusterClientSettings() { - return Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), ADMIN_USER_PW) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") - .build(); - } - - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(XPackClientPlugin.class); - } - - public void testThatTransportClientWithoutAuthenticationDoesNotWork() throws Exception { - try (TransportClient client = transportClient(Settings.EMPTY)) { - boolean connected = awaitBusy(() -> { - return client.connectedNodes().size() > 0; - }, 5L, TimeUnit.SECONDS); - - assertThat(connected, is(false)); - } - } - - public void testThatTransportClientAuthenticationWithTransportClientRole() throws Exception { - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_USER_PW) - .build(); - try (TransportClient client = transportClient(settings)) { - boolean connected = awaitBusy(() -> { - return client.connectedNodes().size() > 0; - }, 5L, TimeUnit.SECONDS); - - assertThat(connected, is(true)); - - // this checks that the transport client is really running in a limited state - try { - client.admin().cluster().prepareHealth().get(); - fail("the transport user should not be be able to get health!"); - } catch (ElasticsearchSecurityException e) { - assertThat(e.toString(), containsString("unauthorized")); - } - } - } - - public void testTransportClientWithAdminUser() throws Exception { - final boolean useTransportUser = randomBoolean(); - Settings settings = Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), useTransportUser ? TRANSPORT_USER_PW : ADMIN_USER_PW) - .build(); - try (TransportClient client = transportClient(settings)) { - boolean connected = awaitBusy(() -> { - return client.connectedNodes().size() > 0; - }, 5L, TimeUnit.SECONDS); - - assertThat(connected, is(true)); - - // this checks that the transport client is really running in a limited state - ClusterHealthResponse response; - if (useTransportUser) { - response = client.filterWithHeader(Collections.singletonMap("Authorization", - basicAuthHeaderValue("test_user", new SecureString("x-pack-test-password".toCharArray())))) - .admin().cluster().prepareHealth().get(); - } else { - response = client.admin().cluster().prepareHealth().get(); - } - - assertThat(response.isTimedOut(), is(false)); - } - } - - TransportClient transportClient(Settings extraSettings) { - NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); - List nodes = nodeInfos.getNodes(); - assertTrue(nodes.isEmpty() == false); - TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); - String clusterName = nodeInfos.getClusterName().value(); - - Settings settings = Settings.builder() - .put(extraSettings) - .put("cluster.name", clusterName) - .build(); - - TransportClient client = new PreBuiltXPackTransportClient(settings); - client.addTransportAddress(publishAddress); - return client; - } -} diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index 1ff65519c367d..4790df3609c35 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -9,7 +9,7 @@ esplugin { dependencies { compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') + testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}" } diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java index 4487187a80b6d..e75c7705ef9bf 100644 --- a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java @@ -5,60 +5,43 @@ */ package org.elasticsearch.example.realm; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -import org.elasticsearch.xpack.core.XPackClientPlugin; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; +import org.elasticsearch.test.rest.ESRestTestCase; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; /** * Integration test to test authentication with the custom realm */ -public class CustomRealmIT extends ESIntegTestCase { +public class CustomRealmIT extends ESRestTestCase { @Override - protected Settings externalClusterClientSettings() { + protected Settings restClientSettings() { return Settings.builder() .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") .build(); } - @Override - protected Collection> transportClientPlugins() { - return Collections.>singleton(XPackClientPlugin.class); - } - - public void testHttpConnectionWithNoAuthentication() throws Exception { - try { - getRestClient().performRequest(new Request("GET", "/")); - fail("request should have failed"); - } catch(ResponseException e) { - Response response = e.getResponse(); - assertThat(response.getStatusLine().getStatusCode(), is(401)); - String value = response.getHeader("WWW-Authenticate"); - assertThat(value, is("custom-challenge")); - } + public void testHttpConnectionWithNoAuthentication() { + Request request = new Request("GET", "/"); + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + builder.addHeader(CustomRealm.USER_HEADER, ""); + builder.addHeader(CustomRealm.PW_HEADER, ""); + request.setOptions(builder); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), is(401)); + String value = response.getHeader("WWW-Authenticate"); + assertThat(value, is("custom-challenge")); } public void testHttpAuthentication() throws Exception { @@ -67,59 +50,16 @@ public void testHttpAuthentication() throws Exception { options.addHeader(CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER); options.addHeader(CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()); request.setOptions(options); - Response response = getRestClient().performRequest(request); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testTransportClient() throws Exception { - NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); - List nodes = nodeInfos.getNodes(); - assertTrue(nodes.isEmpty() == false); - TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); - String clusterName = nodeInfos.getClusterName().value(); - - Settings settings = Settings.builder() - .put("cluster.name", clusterName) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) - .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) - .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) - .build(); - try (TransportClient client = new PreBuiltXPackTransportClient(settings)) { - client.addTransportAddress(publishAddress); - ClusterHealthResponse response = client.admin().cluster().prepareHealth().execute().actionGet(); - assertThat(response.isTimedOut(), is(false)); - } - } - - public void testTransportClientWrongAuthentication() throws Exception { - NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); - List nodes = nodeInfos.getNodes(); - assertTrue(nodes.isEmpty() == false); - TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); - String clusterName = nodeInfos.getClusterName().value(); - - Settings settings = Settings.builder() - .put("cluster.name", clusterName) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) - .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER + randomAlphaOfLength(1)) - .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) - .build(); - try (TransportClient client = new PreBuiltXPackTransportClient(settings)) { - client.addTransportAddress(publishAddress); - client.admin().cluster().prepareHealth().execute().actionGet(); - fail("authentication failure should have resulted in a NoNodesAvailableException"); - } catch (NoNodeAvailableException e) { - // expected - } - } - public void testSettingsFiltering() throws Exception { - NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().clear().setSettings(true).get(); - for(NodeInfo info : nodeInfos.getNodes()) { - Settings settings = info.getSettings(); - assertNotNull(settings); - assertNull(settings.get("xpack.security.authc.realms.custom.custom.filtered_setting")); - assertEquals("0", settings.get("xpack.security.authc.realms.custom.custom.order")); - } + Request request = new Request("GET", "/_nodes/_all/settings"); + request.addParameter("flat_settings", "true"); + Response response = client().performRequest(request); + String responseString = EntityUtils.toString(response.getEntity()); + assertThat(responseString, not(containsString("xpack.security.authc.realms.custom.custom.filtered_setting"))); + assertThat(responseString, containsString("xpack.security.authc.realms.custom.custom.order")); } } diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/role/CustomRolesProviderIT.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/role/CustomRolesProviderIT.java index 57a895848e3a8..3aab2a36562de 100644 --- a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/role/CustomRolesProviderIT.java +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/role/CustomRolesProviderIT.java @@ -9,20 +9,20 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.user.User; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.example.realm.CustomRealm; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.core.security.client.SecurityClient; -import java.util.Collection; +import java.io.IOException; import java.util.Collections; +import java.util.List; import static org.elasticsearch.example.role.CustomInMemoryRolesProvider.INDEX; import static org.elasticsearch.example.role.CustomInMemoryRolesProvider.ROLE_A; @@ -33,7 +33,7 @@ /** * Integration test for custom roles providers. */ -public class CustomRolesProviderIT extends ESIntegTestCase { +public class CustomRolesProviderIT extends ESRestTestCase { private static final String TEST_USER = "test_user"; private static final String TEST_PWD = "change_me"; @@ -46,22 +46,17 @@ public class CustomRolesProviderIT extends ESIntegTestCase { } @Override - protected Settings externalClusterClientSettings() { + protected Settings restClientSettings() { return Settings.builder() - .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) - .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") - .build(); + .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) + .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) + .build(); } - @Override - protected Collection> transportClientPlugins() { - return Collections.singleton(XPackClientPlugin.class); - } - - public void setupTestUser(String role) { - SecurityClient securityClient = new SecurityClient(client()); - securityClient.preparePutUser(TEST_USER, TEST_PWD.toCharArray(), Hasher.BCRYPT, role).get(); + public void setupTestUser(String role) throws IOException { + new TestRestHighLevelClient().security().putUser( + PutUserRequest.withPassword(new User(TEST_USER, List.of(role)), TEST_PWD.toCharArray(), true, RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT); } public void testAuthorizedCustomRoleSucceeds() throws Exception { @@ -69,7 +64,7 @@ public void testAuthorizedCustomRoleSucceeds() throws Exception { // roleB has all permissions on index "foo", so creating "foo" should succeed Request request = new Request("PUT", "/" + INDEX); request.setOptions(AUTH_OPTIONS); - Response response = getRestClient().performRequest(request); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); } @@ -79,27 +74,23 @@ public void testFirstResolvedRoleTakesPrecedence() throws Exception { // the first custom role provider appears first in order, it should take precedence and deny // permission to create the index setupTestUser(ROLE_A); - // roleB has all permissions on index "foo", so creating "foo" should succeed - try { - Request request = new Request("PUT", "/" + INDEX); - request.setOptions(AUTH_OPTIONS); - getRestClient().performRequest(request); - fail(ROLE_A + " should not be authorized to create index " + INDEX); - } catch (ResponseException e) { - assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); - } + Request request = new Request("PUT", "/" + INDEX); + request.setOptions(AUTH_OPTIONS); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); } public void testUnresolvedRoleDoesntSucceed() throws Exception { setupTestUser("unknown"); - // roleB has all permissions on index "foo", so creating "foo" should succeed - try { - Request request = new Request("PUT", "/" + INDEX); - request.setOptions(AUTH_OPTIONS); - getRestClient().performRequest(request); - fail(ROLE_A + " should not be authorized to create index " + INDEX); - } catch (ResponseException e) { - assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); + Request request = new Request("PUT", "/" + INDEX); + request.setOptions(AUTH_OPTIONS); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); + } + + private class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), restClient -> {}, Collections.emptyList()); } } } diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/PreventFailingBuildIT.java b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/PreventFailingBuildIT.java index 2c2cdd044aab7..6ac2cdd3fb654 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/PreventFailingBuildIT.java +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/PreventFailingBuildIT.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.smoketest; -import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.rest.ESRestTestCase; -public class PreventFailingBuildIT extends ESIntegTestCase { +public class PreventFailingBuildIT extends ESRestTestCase { public void testSoThatTestsDoNotFail() { // Noop diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index e88eac3028f3d..d4fe2129363c5 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -7,6 +7,7 @@ apply plugin: 'elasticsearch.rest-test' dependencies { testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(':client:rest-high-level') } String outputDir = "${buildDir}/generated-resources/${project.name}" diff --git a/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java index 6a49e18ca93ef..5662990580f0f 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java +++ b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java @@ -7,35 +7,55 @@ import io.netty.util.ThreadDeathWatcher; import io.netty.util.concurrent.GlobalEventExecutor; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.indices.GetIndexRequest; +import org.elasticsearch.client.indices.GetIndexTemplatesRequest; +import org.elasticsearch.client.indices.GetIndexTemplatesResponse; +import org.elasticsearch.client.xpack.XPackUsageRequest; +import org.elasticsearch.client.xpack.XPackUsageResponse; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.xpack.core.action.XPackUsageRequestBuilder; -import org.elasticsearch.xpack.core.action.XPackUsageResponse; -import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; -import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; import org.junit.After; +import org.junit.AfterClass; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.rules.ExternalResource; -import java.net.InetSocketAddress; -import java.util.Collection; +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Optional; +import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; /** * This test checks that a Monitoring's HTTP exporter correctly exports to a monitoring cluster @@ -46,7 +66,13 @@ * then uses a transport client to check that the data have been correctly received and * indexed in the cluster. */ -public class SmokeTestMonitoringWithSecurityIT extends ESIntegTestCase { +public class SmokeTestMonitoringWithSecurityIT extends ESRestTestCase { + + public class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), RestClient::close, Collections.emptyList()); + } + } /** * A JUnit class level rule that runs after the AfterClass method in {@link ESIntegTestCase}, @@ -78,19 +104,45 @@ protected void after() { }; private static final String USER = "test_user"; - private static final String PASS = "x-pack-test-password"; + private static final SecureString PASS = new SecureString("x-pack-test-password".toCharArray()); + private static final String KEYSTORE_PASS = "testnode"; private static final String MONITORING_PATTERN = ".monitoring-*"; + static Path keyStore; + + @BeforeClass + public static void getKeyStore() { + try { + keyStore = PathUtils.get(SmokeTestMonitoringWithSecurityIT.class.getResource("/testnode.jks").toURI()); + } catch (URISyntaxException e) { + throw new ElasticsearchException("exception while reading the store", e); + } + if (!Files.exists(keyStore)) { + throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist."); + } + } + + @AfterClass + public static void clearKeyStore() { + keyStore = null; + } + + RestHighLevelClient newHighLevelClient() { + return new TestRestHighLevelClient(); + } + @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(XPackPlugin.class); + protected String getProtocol() { + return "https"; } @Override - protected Settings externalClusterClientSettings() { + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(USER, PASS); return Settings.builder() - .put(SecurityField.USER_SETTING.getKey(), USER + ":" + PASS) - .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4).build(); + .put(ThreadContext.PREFIX + ".Authorization", token) + .put(ESRestTestCase.TRUSTSTORE_PATH, keyStore) + .put(ESRestTestCase.TRUSTSTORE_PASSWORD, KEYSTORE_PASS).build(); } @Before @@ -100,73 +152,92 @@ public void enableExporter() throws Exception { .put("xpack.monitoring.exporters._http.enabled", true) .put("xpack.monitoring.exporters._http.host", "https://" + randomNodeHttpAddress()) .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings)); + ClusterUpdateSettingsResponse response = newHighLevelClient().cluster().putSettings( + new ClusterUpdateSettingsRequest().transientSettings(exporterSettings), RequestOptions.DEFAULT); + assertTrue(response.isAcknowledged()); } @After - public void disableExporter() { + public void disableExporter() throws IOException { Settings exporterSettings = Settings.builder() .putNull("xpack.monitoring.collection.enabled") .putNull("xpack.monitoring.exporters._http.enabled") .putNull("xpack.monitoring.exporters._http.host") .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings)); + ClusterUpdateSettingsResponse response = newHighLevelClient().cluster().putSettings( + new ClusterUpdateSettingsRequest().transientSettings(exporterSettings), RequestOptions.DEFAULT); + assertTrue(response.isAcknowledged()); } private boolean getMonitoringUsageExportersDefined() throws Exception { - final XPackUsageResponse usageResponse = new XPackUsageRequestBuilder(client()).execute().get(); - final Optional monitoringUsage = - usageResponse.getUsages() - .stream() - .filter(usage -> usage instanceof MonitoringFeatureSetUsage) - .map(usage -> (MonitoringFeatureSetUsage)usage) - .findFirst(); - - assertThat("Monitoring feature set does not exist", monitoringUsage.isPresent(), is(true)); - - return monitoringUsage.get().getExporters().isEmpty() == false; + RestHighLevelClient client = newHighLevelClient(); + final XPackUsageResponse usageResponse = client.xpack().usage(new XPackUsageRequest(), RequestOptions.DEFAULT); + Map monitoringUsage = usageResponse.getUsages().get("monitoring"); + assertThat("Monitoring feature set does not exist", monitoringUsage, notNullValue()); + + @SuppressWarnings("unchecked") + Map exporters = (Map) monitoringUsage.get("enabled_exporters"); + return exporters != null && exporters.isEmpty() == false; } public void testHTTPExporterWithSSL() throws Exception { // Ensures that the exporter is actually on assertBusy(() -> assertThat("[_http] exporter is not defined", getMonitoringUsageExportersDefined(), is(true))); + RestHighLevelClient client = newHighLevelClient(); // Checks that the monitoring index templates have been installed + GetIndexTemplatesRequest templateRequest = new GetIndexTemplatesRequest(MONITORING_PATTERN); assertBusy(() -> { - GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates(MONITORING_PATTERN).get(); - assertThat(response.getIndexTemplates().size(), greaterThanOrEqualTo(2)); + try { + GetIndexTemplatesResponse response = client.indices().getIndexTemplate(templateRequest, RequestOptions.DEFAULT); + assertThat(response.getIndexTemplates().size(), greaterThanOrEqualTo(2)); + } catch (Exception e) { + fail("template not ready yet: " + e.getMessage()); + } }); + GetIndexRequest indexRequest = new GetIndexRequest(MONITORING_PATTERN); // Waits for monitoring indices to be created assertBusy(() -> { try { - assertThat(client().admin().indices().prepareExists(MONITORING_PATTERN).get().isExists(), equalTo(true)); + assertThat(client.indices().exists(indexRequest, RequestOptions.DEFAULT), equalTo(true)); } catch (Exception e) { - fail("exception when checking for monitoring documents: " + e.getMessage()); + fail("monitoring index not created yet: " + e.getMessage()); } }); // Waits for indices to be ready - ensureYellowAndNoInitializingShards(MONITORING_PATTERN); + ClusterHealthRequest healthRequest = new ClusterHealthRequest(MONITORING_PATTERN); + healthRequest.waitForStatus(ClusterHealthStatus.YELLOW); + healthRequest.waitForEvents(Priority.LANGUID); + healthRequest.waitForNoRelocatingShards(true); + healthRequest.waitForNoInitializingShards(true); + ClusterHealthResponse response = client.cluster().health(healthRequest, RequestOptions.DEFAULT); + assertThat(response.isTimedOut(), is(false)); // Checks that the HTTP exporter has successfully exported some data + SearchRequest searchRequest = new SearchRequest(new String[] { MONITORING_PATTERN }, new SearchSourceBuilder().size(0)); assertBusy(() -> { try { - assertThat(client().prepareSearch(MONITORING_PATTERN).setSize(0).get().getHits().getTotalHits().value, greaterThan(0L)); + assertThat(client.search(searchRequest, RequestOptions.DEFAULT).getHits().getTotalHits().value, greaterThan(0L)); } catch (Exception e) { - fail("exception when checking for monitoring documents: " + e.getMessage()); + fail("monitoring date not exported yet: " + e.getMessage()); } }); } - private String randomNodeHttpAddress() { - List nodes = client().admin().cluster().prepareNodesInfo().clear().setHttp(true).get().getNodes(); - assertThat(nodes.size(), greaterThan(0)); - - InetSocketAddress[] httpAddresses = new InetSocketAddress[nodes.size()]; - for (int i = 0; i < nodes.size(); i++) { - httpAddresses[i] = nodes.get(i).getHttp().address().publishAddress().address(); + private String randomNodeHttpAddress() throws IOException { + Response response = client().performRequest(new Request("GET", "/_nodes")); + assertOK(response); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + Map nodesAsMap = objectPath.evaluate("nodes"); + List httpAddresses = new ArrayList<>(); + for (Map.Entry entry : nodesAsMap.entrySet()) { + Map nodeDetails = (Map) entry.getValue(); + Map httpInfo = (Map) nodeDetails.get("http"); + httpAddresses.add((String) httpInfo.get("publish_address")); } - return NetworkAddress.format(randomFrom(httpAddresses)); + assertThat(httpAddresses.size(), greaterThan(0)); + return randomFrom(httpAddresses); } } diff --git a/x-pack/qa/transport-client-tests/build.gradle b/x-pack/qa/transport-client-tests/build.gradle deleted file mode 100644 index 5ca96eb0d7a87..0000000000000 --- a/x-pack/qa/transport-client-tests/build.gradle +++ /dev/null @@ -1,22 +0,0 @@ -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') -} - -integTestCluster { - setting 'xpack.security.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' -} - - -testingConventions { - naming.clear() - naming { - IT { - baseClass 'org.elasticsearch.xpack.ml.client.ESXPackSmokeClientTestCase' - } - } -} \ No newline at end of file diff --git a/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/ESXPackSmokeClientTestCase.java b/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/ESXPackSmokeClientTestCase.java deleted file mode 100644 index 28267614dd36d..0000000000000 --- a/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/ESXPackSmokeClientTestCase.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.client; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.env.Environment; -import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.URL; -import java.nio.file.Path; -import java.util.Locale; -import java.util.concurrent.atomic.AtomicInteger; - -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; -import static org.hamcrest.Matchers.notNullValue; - -/** - * An abstract base class to run integration tests against an Elasticsearch - * cluster running outside of the test process. - *

- * You can define a list of transport addresses from where you can reach your - * cluster by setting "tests.cluster" system property. It defaults to - * "localhost:9300". If you run this from `gradle integTest` then it will start - * the clsuter for you and set up the property. - *

- * If you want to debug this module from your IDE, then start an external - * cluster by yourself, maybe with `gradle run`, then run JUnit. If you changed - * the default port, set "-Dtests.cluster=localhost:PORT" when running your - * test. - */ -@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") -public abstract class ESXPackSmokeClientTestCase extends LuceneTestCase { - - /** - * Key used to eventually switch to using an external cluster and provide - * its transport addresses - */ - public static final String TESTS_CLUSTER = "tests.cluster"; - - protected static final Logger logger = LogManager.getLogger(ESXPackSmokeClientTestCase.class); - - private static final AtomicInteger counter = new AtomicInteger(); - private static Client client; - private static String clusterAddresses; - protected String index; - - private static Client startClient(Path tempDir, TransportAddress... transportAddresses) { - Settings.Builder builder = Settings.builder() - .put("node.name", "qa_xpack_smoke_client_" + counter.getAndIncrement()) - .put("client.transport.ignore_cluster_name", true) - .put("xpack.security.enabled", false) - .put(Environment.PATH_HOME_SETTING.getKey(), tempDir); - TransportClient client = new PreBuiltXPackTransportClient(builder.build()) - .addTransportAddresses(transportAddresses); - - logger.info("--> Elasticsearch Java TransportClient started"); - - Exception clientException = null; - try { - ClusterHealthResponse health = client.admin().cluster().prepareHealth().get(); - logger.info("--> connected to [{}] cluster which is running [{}] node(s).", - health.getClusterName(), health.getNumberOfNodes()); - } catch (Exception e) { - logger.error("Error getting cluster health", e); - clientException = e; - } - - assumeNoException("Sounds like your cluster is not running at " + clusterAddresses, - clientException); - - return client; - } - - private static Client startClient() throws IOException { - String[] stringAddresses = clusterAddresses.split(","); - TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; - int i = 0; - for (String stringAddress : stringAddresses) { - URL url = new URL("http://" + stringAddress); - InetAddress inetAddress = InetAddress.getByName(url.getHost()); - transportAddresses[i++] = new TransportAddress( - new InetSocketAddress(inetAddress, url.getPort())); - } - return startClient(createTempDir(), transportAddresses); - } - - public static Client getClient() { - if (client == null) { - try { - client = startClient(); - } catch (IOException e) { - logger.error("can not start the client", e); - } - assertThat(client, notNullValue()); - } - return client; - } - - @BeforeClass - public static void initializeSettings() { - clusterAddresses = System.getProperty(TESTS_CLUSTER); - if (clusterAddresses == null || clusterAddresses.isEmpty()) { - fail("Must specify " + TESTS_CLUSTER + " for smoke client test"); - } - } - - @AfterClass - public static void stopTransportClient() { - if (client != null) { - client.close(); - client = null; - } - } - - @Before - public void defineIndexName() { - doClean(); - index = "qa-xpack-smoke-test-client-" - + randomAsciiOfLength(10).toLowerCase(Locale.getDefault()); - } - - @After - public void cleanIndex() { - doClean(); - } - - private void doClean() { - if (client != null) { - try { - client.admin().indices().prepareDelete(index).get(); - } catch (Exception e) { - // We ignore this cleanup exception - } - } - } -} diff --git a/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/MLTransportClientIT.java b/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/MLTransportClientIT.java deleted file mode 100644 index 1a4959c0be84a..0000000000000 --- a/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/MLTransportClientIT.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.client; - -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.ml.action.CloseJobAction; -import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; -import org.elasticsearch.xpack.core.ml.action.FlushJobAction; -import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; -import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; -import org.elasticsearch.xpack.core.ml.action.GetJobsAction; -import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; -import org.elasticsearch.xpack.core.ml.action.OpenJobAction; -import org.elasticsearch.xpack.core.ml.action.PostDataAction; -import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.PutJobAction; -import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; -import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; -import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; -import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; -import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; -import org.elasticsearch.xpack.core.ml.job.config.DataDescription; -import org.elasticsearch.xpack.core.ml.job.config.Detector; -import org.elasticsearch.xpack.core.ml.job.config.Job; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; - -public class MLTransportClientIT extends ESXPackSmokeClientTestCase { - - public void testMLTransportClient_JobActions() { - Client client = getClient(); - XPackClient xPackClient = new XPackClient(client); - MachineLearningClient mlClient = xPackClient.machineLearning(); - - String jobId = "ml-transport-client-it-job"; - Job.Builder job = createJob(jobId); - - PutJobAction.Response putJobResponse = mlClient.putJob(new PutJobAction.Request(job)).actionGet(); - assertThat(putJobResponse, notNullValue()); - - GetJobsAction.Response getJobResponse = mlClient.getJobs(new GetJobsAction.Request(jobId)).actionGet(); - assertThat(getJobResponse, notNullValue()); - assertThat(getJobResponse.getResponse(), notNullValue()); - assertThat(getJobResponse.getResponse().count(), equalTo(1L)); - - // Open job POST data, flush, close and check a result - AcknowledgedResponse openJobResponse = mlClient.openJob(new OpenJobAction.Request(jobId)).actionGet(); - assertThat(openJobResponse.isAcknowledged(), equalTo(true)); - - String content = "{\"time\":1000, \"msg\": \"some categorical message\"}\n" + - "{\"time\":11000, \"msg\": \"some categorical message in the second bucket\"}\n" + - "{\"time\":21000, \"msg\": \"some categorical message in the third bucket\"}\n"; - PostDataAction.Request postRequest = new PostDataAction.Request(jobId); - postRequest.setContent(new BytesArray(content), XContentType.JSON); - PostDataAction.Response postResponse = mlClient.postData(postRequest).actionGet(); - assertThat(postResponse.getDataCounts(), notNullValue()); - assertThat(postResponse.getDataCounts().getInputFieldCount(), equalTo(3L)); - - FlushJobAction.Response flushResponse = mlClient.flushJob(new FlushJobAction.Request(jobId)).actionGet(); - assertThat(flushResponse.isFlushed(), equalTo(true)); - - CloseJobAction.Response closeResponse = mlClient.closeJob(new CloseJobAction.Request(jobId)).actionGet(); - assertThat(closeResponse.isClosed(), equalTo(true)); - - GetBucketsAction.Response getBucketsResponse = mlClient.getBuckets(new GetBucketsAction.Request(jobId)).actionGet(); - assertThat(getBucketsResponse.getBuckets().count(), equalTo(1L)); - - // Update a model snapshot - GetModelSnapshotsAction.Response getModelSnapshotResponse = - mlClient.getModelSnapshots(new GetModelSnapshotsAction.Request(jobId, null)).actionGet(); - assertThat(getModelSnapshotResponse.getPage().count(), equalTo(1L)); - String snapshotId = getModelSnapshotResponse.getPage().results().get(0).getSnapshotId(); - - UpdateModelSnapshotAction.Request updateModelSnapshotRequest = new UpdateModelSnapshotAction.Request(jobId, snapshotId); - updateModelSnapshotRequest.setDescription("Changed description"); - UpdateModelSnapshotAction.Response updateModelSnapshotResponse = - mlClient.updateModelSnapshot(updateModelSnapshotRequest).actionGet(); - assertThat(updateModelSnapshotResponse.getModel(), notNullValue()); - assertThat(updateModelSnapshotResponse.getModel().getDescription(), equalTo("Changed description")); - - // and delete the job - AcknowledgedResponse deleteJobResponse = mlClient.deleteJob(new DeleteJobAction.Request(jobId)).actionGet(); - assertThat(deleteJobResponse, notNullValue()); - assertThat(deleteJobResponse.isAcknowledged(), equalTo(true)); - } - - public void testMLTransportClient_ValidateActions() { - Client client = getClient(); - XPackClient xPackClient = new XPackClient(client); - MachineLearningClient mlClient = xPackClient.machineLearning(); - - Detector.Builder detector = new Detector.Builder(); - detector.setFunction("count"); - ValidateDetectorAction.Request validateDetectorRequest = new ValidateDetectorAction.Request(detector.build()); - AcknowledgedResponse validateDetectorResponse = mlClient.validateDetector(validateDetectorRequest).actionGet(); - assertThat(validateDetectorResponse.isAcknowledged(), equalTo(true)); - - Job.Builder job = createJob("ml-transport-client-it-validate-job"); - ValidateJobConfigAction.Request validateJobRequest = new ValidateJobConfigAction.Request(job.build(new Date())); - AcknowledgedResponse validateJobResponse = mlClient.validateJobConfig(validateJobRequest).actionGet(); - assertThat(validateJobResponse.isAcknowledged(), equalTo(true)); - } - - - public void testMLTransportClient_DateFeedActions() { - Client client = getClient(); - XPackClient xPackClient = new XPackClient(client); - MachineLearningClient mlClient = xPackClient.machineLearning(); - - String jobId = "ml-transport-client-it-datafeed-job"; - Job.Builder job = createJob(jobId); - - PutJobAction.Response putJobResponse = mlClient.putJob(new PutJobAction.Request(job)).actionGet(); - assertThat(putJobResponse, notNullValue()); - - String datafeedId = "ml-transport-client-it-datafeed"; - DatafeedConfig.Builder datafeed = new DatafeedConfig.Builder(datafeedId, jobId); - String datafeedIndex = "ml-transport-client-test"; - String datatype = "type-bar"; - datafeed.setIndices(Collections.singletonList(datafeedIndex)); - - mlClient.putDatafeed(new PutDatafeedAction.Request(datafeed.build())).actionGet(); - - GetDatafeedsAction.Response getDatafeedResponse = mlClient.getDatafeeds(new GetDatafeedsAction.Request(datafeedId)).actionGet(); - assertThat(getDatafeedResponse.getResponse(), notNullValue()); - - // Open job before starting the datafeed - AcknowledgedResponse openJobResponse = mlClient.openJob(new OpenJobAction.Request(jobId)).actionGet(); - assertThat(openJobResponse.isAcknowledged(), equalTo(true)); - - // create the index for the data feed - Map source = new HashMap<>(); - source.put("time", new Date()); - source.put("message", "some message"); - client.prepareIndex(datafeedIndex, datatype).setSource(source).get(); - - StartDatafeedAction.Request startDatafeedRequest = new StartDatafeedAction.Request(datafeedId, new Date().getTime()); - AcknowledgedResponse startDataFeedResponse = mlClient.startDatafeed(startDatafeedRequest).actionGet(); - assertThat(startDataFeedResponse.isAcknowledged(), equalTo(true)); - - StopDatafeedAction.Response stopDataFeedResponse = mlClient.stopDatafeed(new StopDatafeedAction.Request(datafeedId)).actionGet(); - assertThat(stopDataFeedResponse.isStopped(), equalTo(true)); - } - - private Job.Builder createJob(String jobId) { - Job.Builder job = new Job.Builder(); - job.setId(jobId); - - List detectors = new ArrayList<>(); - Detector.Builder detector = new Detector.Builder(); - detector.setFunction("count"); - detectors.add(detector.build()); - - AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(detectors); - analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(10L)); - job.setAnalysisConfig(analysisConfig); - job.setDataDescription(new DataDescription.Builder()); - return job; - } -} diff --git a/x-pack/transport-client/build.gradle b/x-pack/transport-client/build.gradle deleted file mode 100644 index d764ef897447a..0000000000000 --- a/x-pack/transport-client/build.gradle +++ /dev/null @@ -1,41 +0,0 @@ -apply plugin: 'elasticsearch.build' -apply plugin: 'nebula.maven-base-publish' -apply plugin: 'nebula.maven-scm' - -group = 'org.elasticsearch.client' -archivesBaseName = 'x-pack-transport' - -dependencies { - // this "api" dependency looks weird, but it is correct, as it contains - // all of x-pack for now, and transport client will be going away in the future. - compile "org.elasticsearch.plugin:x-pack-core:${version}" - compile "org.elasticsearch.client:transport:${version}" - testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" - testCompile "junit:junit:${versions.junit}" - testCompile "org.hamcrest:hamcrest:${versions.hamcrest}" -} - -dependencyLicenses.enabled = false - -forbiddenApisTest { - // we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to - // be pulled in - replaceSignatureFiles 'jdk-signatures', 'es-all-signatures' -} - -testingConventions { - naming.clear() - naming { - Tests { - baseClass 'com.carrotsearch.randomizedtesting.RandomizedTest' - } - } -} - -publishing { - publications { - nebula(MavenPublication) { - artifactId = archivesBaseName - } - } -} diff --git a/x-pack/transport-client/src/main/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClient.java b/x-pack/transport-client/src/main/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClient.java deleted file mode 100644 index cf4e5db92b00e..0000000000000 --- a/x-pack/transport-client/src/main/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClient.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.client; - -import io.netty.util.ThreadDeathWatcher; -import io.netty.util.concurrent.GlobalEventExecutor; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.transport.client.PreBuiltTransportClient; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.xpack.core.security.SecurityField; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.concurrent.TimeUnit; - -/** - * A builder to create an instance of {@link TransportClient} that pre-installs - * all of the plugins installed by the {@link PreBuiltTransportClient} and the - * {@link XPackPlugin} so that the client may be used with an x-pack enabled - * cluster. - * - * @deprecated {@link TransportClient} is deprecated in favour of the high-level REST client and will be removed in Elasticsearch 8.0 - */ -@SuppressWarnings({"unchecked","varargs"}) -@Deprecated -public class PreBuiltXPackTransportClient extends PreBuiltTransportClient { - - @SafeVarargs - public PreBuiltXPackTransportClient(Settings settings, Class... plugins) { - this(settings, Arrays.asList(plugins)); - } - - public PreBuiltXPackTransportClient(Settings settings, Collection> plugins) { - this(settings, plugins, null); - } - - public PreBuiltXPackTransportClient(Settings settings, Collection> plugins, - HostFailureListener hostFailureListener) { - super(settings, addPlugins(plugins, Collections.singletonList(XPackClientPlugin.class)), hostFailureListener); - } - - @Override - public void close() { - super.close(); - if (NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(SecurityField.NAME4)) { - try { - GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - try { - ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } -} diff --git a/x-pack/transport-client/src/test/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClientTests.java b/x-pack/transport-client/src/test/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClientTests.java deleted file mode 100644 index f9808ce54faac..0000000000000 --- a/x-pack/transport-client/src/test/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClientTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.client; - -import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.core.security.SecurityField; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -/** - * Unit tests for the {@link PreBuiltXPackTransportClient} - */ -public class PreBuiltXPackTransportClientTests extends RandomizedTest { - - @Test - public void testPluginInstalled() { - try (TransportClient client = new PreBuiltXPackTransportClient(Settings.EMPTY)) { - Settings settings = client.settings(); - assertEquals(SecurityField.NAME4, NetworkModule.TRANSPORT_TYPE_SETTING.get(settings)); - } - } - -} \ No newline at end of file From e46622bb76338207f81e1426f8de1763b6c86453 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 23 May 2019 13:24:33 -0400 Subject: [PATCH 230/321] SQL: Add back the single node JDBC tests (#41960) Adds back single node tests that were accidentally removed. From 4bec333f7264d1f154bcb17eb524d40bab68d746 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 23 May 2019 13:27:34 -0400 Subject: [PATCH 231/321] Mute accounting circuit breaker check after test (#42448) If we close an engine while a refresh is happening, then we might leak refCount of some SegmentReaders. We need to skip the ram accounting circuit breaker check until we have a new Lucene snapshot which includes the fix for LUCENE-8809. This also adds a test to the engine but left it muted so we won't forget to reenable this check. Closes #30290 --- .../index/engine/InternalEngineTests.java | 42 +++++++++++++++++++ .../index/engine/EngineTestCase.java | 3 ++ .../test/InternalTestCluster.java | 21 ++++++---- 3 files changed, 57 insertions(+), 9 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index db9de3765b1e7..b213da097ce5e 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -81,6 +81,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -154,6 +155,7 @@ import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -5636,4 +5638,44 @@ public void testMaxSeqNoInCommitUserData() throws Exception { rollTranslog.join(); assertMaxSeqNoInCommitUserData(engine); } + + @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-8809") + public void testRefreshAndFailEngineConcurrently() throws Exception { + AtomicBoolean stopped = new AtomicBoolean(); + Semaphore indexedDocs = new Semaphore(0); + Thread indexer = new Thread(() -> { + while (stopped.get() == false) { + String id = Integer.toString(randomIntBetween(1, 100)); + try { + engine.index(indexForDoc(createParsedDoc(id, null))); + indexedDocs.release(); + } catch (IOException e) { + throw new AssertionError(e); + } catch (AlreadyClosedException e) { + return; + } + } + }); + + Thread refresher = new Thread(() -> { + while (stopped.get() == false) { + try { + engine.refresh("test", randomFrom(Engine.SearcherScope.values()), randomBoolean()); + } catch (AlreadyClosedException e) { + return; + } + } + }); + indexer.start(); + refresher.start(); + indexedDocs.acquire(randomIntBetween(1, 100)); + try { + engine.failEngine("test", new IOException("simulated error")); + } finally { + stopped.set(true); + indexer.join(); + refresher.join(); + } + assertThat(engine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index afa319af7e1cf..e25217eaccc9b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -55,6 +55,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -269,6 +270,8 @@ public void tearDown() throws Exception { assertConsistentHistoryBetweenTranslogAndLuceneIndex(replicaEngine, createMapperService("test")); assertMaxSeqNoInCommitUserData(replicaEngine); } + assertThat(engine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); + assertThat(replicaEngine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); } finally { IOUtils.close(replicaEngine, storeReplica, engine, store, () -> terminate(threadPool)); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 2e88a018e5a0d..cc071df9769ca 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -2334,15 +2334,18 @@ public void ensureEstimatedStats() { final CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class, nodeAndClient.node); CircuitBreaker fdBreaker = breakerService.getBreaker(CircuitBreaker.FIELDDATA); assertThat("Fielddata breaker not reset to 0 on node: " + name, fdBreaker.getUsed(), equalTo(0L)); - try { - assertBusy(() -> { - CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat("Accounting breaker not reset to 0 on node: " + name + ", are there still Lucene indices around?", - acctBreaker.getUsed(), equalTo(0L)); - }); - } catch (Exception e) { - throw new AssertionError("Exception during check for accounting breaker reset to 0", e); - } + + // Mute this assertion until we have a new Lucene snapshot with https://issues.apache.org/jira/browse/LUCENE-8809. + // try { + // assertBusy(() -> { + // CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); + // assertThat("Accounting breaker not reset to 0 on node: " + name + ", are there still Lucene indices around?", + // acctBreaker.getUsed(), equalTo(0L)); + // }); + // } catch (Exception e) { + // throw new AssertionError("Exception during check for accounting breaker reset to 0", e); + // } + // Anything that uses transport or HTTP can increase the // request breaker (because they use bigarrays), because of // that the breaker can sometimes be incremented from ping From 8177e7102d9438c0b6d2fe55a351089a5e5b98a2 Mon Sep 17 00:00:00 2001 From: sandmannn Date: Thu, 23 May 2019 19:38:11 +0200 Subject: [PATCH 232/321] Split document and metadata fields in GetResult (#38373) This commit makes creators of GetField split the fields into document fields and metadata fields. It is part of larger refactoring that aims to remove the calls to static methods of MapperService related to metadata fields, as discussed in #24422. --- build.gradle | 4 +- .../PercolateQueryBuilderTests.java | 4 +- .../action/update/UpdateHelper.java | 2 +- .../action/update/UpdateResponse.java | 3 +- .../elasticsearch/index/get/GetResult.java | 141 +++++++++++------- .../index/get/ShardGetService.java | 18 ++- .../action/explain/ExplainResponseTests.java | 4 +- .../action/get/GetResponseTests.java | 10 +- .../action/get/MultiGetResponseTests.java | 2 +- .../action/update/UpdateRequestTests.java | 20 +-- .../action/update/UpdateResponseTests.java | 2 +- .../index/get/GetResultTests.java | 48 +++--- .../query/GeoShapeQueryBuilderTests.java | 2 +- .../index/query/TermsQueryBuilderTests.java | 2 +- .../document/RestGetSourceActionTests.java | 6 +- .../authc/AuthenticationServiceTests.java | 10 +- .../authc/esnative/NativeUsersStoreTests.java | 3 + .../store/NativePrivilegeStoreTests.java | 6 +- .../xpack/security/test/SecurityMocks.java | 3 +- .../execution/ExecutionServiceTests.java | 6 +- .../ack/TransportAckWatchActionTests.java | 2 +- 21 files changed, 184 insertions(+), 114 deletions(-) diff --git a/build.gradle b/build.gradle index 7de02b814da86..037d3242dc4b7 100644 --- a/build.gradle +++ b/build.gradle @@ -162,8 +162,8 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = true -final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ +boolean bwc_tests_enabled = false +final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/38373" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index e697c2f66eed8..6053a92b54a20 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -153,12 +153,12 @@ protected GetResponse executeGet(GetRequest getRequest) { if (indexedDocumentExists) { return new GetResponse( new GetResult(indexedDocumentIndex, MapperService.SINGLE_MAPPING_NAME, indexedDocumentId, 0, 1, 0L, true, - documentSource.iterator().next(), Collections.emptyMap()) + documentSource.iterator().next(), Collections.emptyMap(), Collections.emptyMap()) ); } else { return new GetResponse( new GetResult(indexedDocumentIndex, MapperService.SINGLE_MAPPING_NAME, indexedDocumentId, UNASSIGNED_SEQ_NO, 0, -1, - false, null, Collections.emptyMap()) + false, null, Collections.emptyMap(), Collections.emptyMap()) ); } } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 54cd38aa0b960..c6e45af0e6a89 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -306,7 +306,7 @@ public static GetResult extractGetResult(final UpdateRequest request, String con // TODO when using delete/none, we can still return the source as bytes by generating it (using the sourceContentType) return new GetResult(concreteIndex, request.type(), request.id(), seqNo, primaryTerm, version, true, sourceFilteredAsBytes, - Collections.emptyMap()); + Collections.emptyMap(), Collections.emptyMap()); } public static class Result { diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java b/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java index 03d721b26fe08..f3afec4f25b29 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateResponse.java @@ -164,7 +164,8 @@ public UpdateResponse build() { if (getResult != null) { update.setGetResult(new GetResult(update.getIndex(), update.getType(), update.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), update.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields())); + getResult.isExists(), getResult.internalSourceRef(), getResult.getDocumentFields(), + getResult.getMetadataFields())); } update.setForcedRefresh(forcedRefresh); return update; diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index 5769b659e40b3..ffaa42ce0ad21 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.get; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; @@ -36,11 +37,9 @@ import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.Objects; @@ -67,7 +66,8 @@ public class GetResult implements Streamable, Iterable, ToXConten private long seqNo; private long primaryTerm; private boolean exists; - private Map fields; + private Map documentFields; + private Map metaFields; private Map sourceAsMap; private BytesReference source; private byte[] sourceAsBytes; @@ -76,7 +76,7 @@ public class GetResult implements Streamable, Iterable, ToXConten } public GetResult(String index, String type, String id, long seqNo, long primaryTerm, long version, boolean exists, - BytesReference source, Map fields) { + BytesReference source, Map documentFields, Map metaFields) { this.index = index; this.type = type; this.id = id; @@ -89,9 +89,13 @@ public GetResult(String index, String type, String id, long seqNo, long primaryT this.version = version; this.exists = exists; this.source = source; - this.fields = fields; - if (this.fields == null) { - this.fields = emptyMap(); + this.documentFields = documentFields; + if (this.documentFields == null) { + this.documentFields = emptyMap(); + } + this.metaFields = metaFields; + if (this.metaFields == null) { + this.metaFields = emptyMap(); } } @@ -222,20 +226,31 @@ public Map getSource() { return sourceAsMap(); } + + public Map getMetadataFields() { + return metaFields; + } + + public Map getDocumentFields() { + return documentFields; + } + public Map getFields() { + Map fields = new HashMap<>(); + fields.putAll(metaFields); + fields.putAll(documentFields); return fields; } public DocumentField field(String name) { - return fields.get(name); + return getFields().get(name); } @Override public Iterator iterator() { - if (fields == null) { - return Collections.emptyIterator(); - } - return fields.values().iterator(); + // need to join the fields and metadata fields + Map allFields = this.getFields(); + return allFields.values().iterator(); } public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params) throws IOException { @@ -244,21 +259,7 @@ public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params builder.field(_PRIMARY_TERM, primaryTerm); } - List metaFields = new ArrayList<>(); - List otherFields = new ArrayList<>(); - if (fields != null && !fields.isEmpty()) { - for (DocumentField field : fields.values()) { - if (field.getValues().isEmpty()) { - continue; - } - if (field.isMetadataField()) { - metaFields.add(field); - } else { - otherFields.add(field); - } - } - } - for (DocumentField field : metaFields) { + for (DocumentField field : metaFields.values()) { // TODO: can we avoid having an exception here? if (field.getName().equals(IgnoredFieldMapper.NAME)) { builder.field(field.getName(), field.getValues()); @@ -273,9 +274,9 @@ public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params XContentHelper.writeRawField(SourceFieldMapper.NAME, source, builder, params); } - if (!otherFields.isEmpty()) { + if (!documentFields.isEmpty()) { builder.startObject(FIELDS); - for (DocumentField field : otherFields) { + for (DocumentField field : documentFields.values()) { field.toXContent(builder, params); } builder.endObject(); @@ -317,7 +318,8 @@ public static GetResult fromXContentEmbedded(XContentParser parser, String index long primaryTerm = UNASSIGNED_PRIMARY_TERM; Boolean found = null; BytesReference source = null; - Map fields = new HashMap<>(); + Map documentFields = new HashMap<>(); + Map metaFields = new HashMap<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -337,7 +339,8 @@ public static GetResult fromXContentEmbedded(XContentParser parser, String index } else if (FOUND.equals(currentFieldName)) { found = parser.booleanValue(); } else { - fields.put(currentFieldName, new DocumentField(currentFieldName, Collections.singletonList(parser.objectText()))); + metaFields.put(currentFieldName, new DocumentField(currentFieldName, + Collections.singletonList(parser.objectText()))); } } else if (token == XContentParser.Token.START_OBJECT) { if (SourceFieldMapper.NAME.equals(currentFieldName)) { @@ -350,20 +353,20 @@ public static GetResult fromXContentEmbedded(XContentParser parser, String index } else if (FIELDS.equals(currentFieldName)) { while(parser.nextToken() != XContentParser.Token.END_OBJECT) { DocumentField getField = DocumentField.fromXContent(parser); - fields.put(getField.getName(), getField); + documentFields.put(getField.getName(), getField); } } else { parser.skipChildren(); // skip potential inner objects for forward compatibility } } else if (token == XContentParser.Token.START_ARRAY) { if (IgnoredFieldMapper.NAME.equals(currentFieldName)) { - fields.put(currentFieldName, new DocumentField(currentFieldName, parser.list())); + metaFields.put(currentFieldName, new DocumentField(currentFieldName, parser.list())); } else { parser.skipChildren(); // skip potential inner arrays for forward compatibility } } } - return new GetResult(index, type, id, seqNo, primaryTerm, version, found, source, fields); + return new GetResult(index, type, id, seqNo, primaryTerm, version, found, source, documentFields, metaFields); } public static GetResult fromXContent(XContentParser parser) throws IOException { @@ -379,6 +382,35 @@ public static GetResult readGetResult(StreamInput in) throws IOException { return result; } + private Map readFields(StreamInput in) throws IOException { + Map fields = null; + int size = in.readVInt(); + if (size == 0) { + fields = new HashMap<>(); + } else { + fields = new HashMap<>(size); + for (int i = 0; i < size; i++) { + DocumentField field = DocumentField.readDocumentField(in); + fields.put(field.getName(), field); + } + } + return fields; + } + + static void splitFieldsByMetadata(Map fields, Map outOther, + Map outMetadata) { + if (fields == null) { + return; + } + for (Map.Entry fieldEntry: fields.entrySet()) { + if (fieldEntry.getValue().isMetadataField()) { + outMetadata.put(fieldEntry.getKey(), fieldEntry.getValue()); + } else { + outOther.put(fieldEntry.getKey(), fieldEntry.getValue()); + } + } + } + @Override public void readFrom(StreamInput in) throws IOException { index = in.readString(); @@ -393,15 +425,14 @@ public void readFrom(StreamInput in) throws IOException { if (source.length() == 0) { source = null; } - int size = in.readVInt(); - if (size == 0) { - fields = emptyMap(); + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + documentFields = readFields(in); + metaFields = readFields(in); } else { - fields = new HashMap<>(size); - for (int i = 0; i < size; i++) { - DocumentField field = DocumentField.readDocumentField(in); - fields.put(field.getName(), field); - } + Map fields = readFields(in); + documentFields = new HashMap<>(); + metaFields = new HashMap<>(); + splitFieldsByMetadata(fields, documentFields, metaFields); } } } @@ -417,13 +448,22 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(exists); if (exists) { out.writeBytesReference(source); - if (fields == null) { - out.writeVInt(0); + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + writeFields(out, documentFields); + writeFields(out, metaFields); } else { - out.writeVInt(fields.size()); - for (DocumentField field : fields.values()) { - field.writeTo(out); - } + writeFields(out, this.getFields()); + } + } + } + + private void writeFields(StreamOutput out, Map fields) throws IOException { + if (fields == null) { + out.writeVInt(0); + } else { + out.writeVInt(fields.size()); + for (DocumentField field : fields.values()) { + field.writeTo(out); } } } @@ -444,13 +484,14 @@ public boolean equals(Object o) { Objects.equals(index, getResult.index) && Objects.equals(type, getResult.type) && Objects.equals(id, getResult.id) && - Objects.equals(fields, getResult.fields) && + Objects.equals(documentFields, getResult.documentFields) && + Objects.equals(metaFields, getResult.metaFields) && Objects.equals(sourceAsMap(), getResult.sourceAsMap()); } @Override public int hashCode() { - return Objects.hash(version, seqNo, primaryTerm, exists, index, type, id, fields, sourceAsMap()); + return Objects.hash(version, seqNo, primaryTerm, exists, index, type, id, documentFields, metaFields, sourceAsMap()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 3c85fe40c5ba7..f77fc072c7062 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -118,7 +118,7 @@ public GetResult getForUpdate(String type, String id, long ifSeqNo, long ifPrima public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext) { if (!engineGetResult.exists()) { - return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null); + return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null, null); } currentMetric.inc(); @@ -174,7 +174,7 @@ private GetResult innerGet(String type, String id, String[] gFields, boolean rea } if (get == null || get.exists() == false) { - return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null); + return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null, null); } try { @@ -187,7 +187,8 @@ private GetResult innerGet(String type, String id, String[] gFields, boolean rea private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) { - Map fields = null; + Map documentFields = null; + Map metaDataFields = null; BytesReference source = null; DocIdAndVersion docIdAndVersion = get.docIdAndVersion(); FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext); @@ -201,9 +202,14 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] if (!fieldVisitor.fields().isEmpty()) { fieldVisitor.postProcess(mapperService); - fields = new HashMap<>(fieldVisitor.fields().size()); + documentFields = new HashMap<>(); + metaDataFields = new HashMap<>(); for (Map.Entry> entry : fieldVisitor.fields().entrySet()) { - fields.put(entry.getKey(), new DocumentField(entry.getKey(), entry.getValue())); + if (MapperService.isMetadataField(entry.getKey())) { + metaDataFields.put(entry.getKey(), new DocumentField(entry.getKey(), entry.getValue())); + } else { + documentFields.put(entry.getKey(), new DocumentField(entry.getKey(), entry.getValue())); + } } } } @@ -240,7 +246,7 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] } return new GetResult(shardId.getIndexName(), type, id, get.docIdAndVersion().seqNo, get.docIdAndVersion().primaryTerm, - get.version(), get.exists(), source, fields); + get.version(), get.exists(), source, documentFields, metaDataFields); } private static FieldsVisitor buildFieldsVisitors(String[] fields, FetchSourceContext fetchSourceContext) { diff --git a/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java b/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java index 9f1ee08844b66..1b3b9a8afa9f4 100644 --- a/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/explain/ExplainResponseTests.java @@ -70,7 +70,7 @@ protected ExplainResponse createTestInstance() { 0, 1, randomNonNegativeLong(), true, RandomObjects.randomSource(random()), - singletonMap(fieldName, new DocumentField(fieldName, values))); + singletonMap(fieldName, new DocumentField(fieldName, values)), null); return new ExplainResponse(index, type, id, exist, explanation, getResult); } @@ -87,7 +87,7 @@ public void testToXContent() throws IOException { Explanation explanation = Explanation.match(1.0f, "description", Collections.emptySet()); GetResult getResult = new GetResult(null, null, null, 0, 1, -1, true, new BytesArray("{ \"field1\" : " + "\"value1\", \"field2\":\"value2\"}"), singletonMap("field1", new DocumentField("field1", - singletonList("value1")))); + singletonList("value1"))), null); ExplainResponse response = new ExplainResponse(index, type, id, exist, explanation, getResult); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); diff --git a/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java b/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java index a215a47b89466..359a394b33806 100644 --- a/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java @@ -94,14 +94,15 @@ public void testToXContent() { { GetResponse getResponse = new GetResponse(new GetResult("index", "type", "id", 0, 1, 1, true, new BytesArray("{ \"field1\" : " + "\"value1\", \"field2\":\"value2\"}"), Collections.singletonMap("field1", new DocumentField("field1", - Collections.singletonList("value1"))))); + Collections.singletonList("value1"))), null)); String output = Strings.toString(getResponse); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"},\"fields\":{\"field1\":[\"value1\"]}}", output); } { - GetResponse getResponse = new GetResponse(new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null)); + GetResponse getResponse = new GetResponse(new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, + 0, 1, false, null, null, null)); String output = Strings.toString(getResponse); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"found\":false}", output); } @@ -110,7 +111,7 @@ public void testToXContent() { public void testToString() { GetResponse getResponse = new GetResponse(new GetResult("index", "type", "id", 0, 1, 1, true, new BytesArray("{ \"field1\" : " + "\"value1\", \"field2\":\"value2\"}"), - Collections.singletonMap("field1", new DocumentField("field1", Collections.singletonList("value1"))))); + Collections.singletonMap("field1", new DocumentField("field1", Collections.singletonList("value1"))), null)); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"},\"fields\":{\"field1\":[\"value1\"]}}", getResponse.toString()); @@ -123,7 +124,8 @@ public void testEqualsAndHashcode() { public void testFromXContentThrowsParsingException() throws IOException { GetResponse getResponse = - new GetResponse(new GetResult(null, null, null, UNASSIGNED_SEQ_NO, 0, randomIntBetween(1, 5), randomBoolean(), null, null)); + new GetResponse(new GetResult(null, null, null, UNASSIGNED_SEQ_NO, 0, randomIntBetween(1, 5), + randomBoolean(), null, null, null)); XContentType xContentType = randomFrom(XContentType.values()); BytesReference originalBytes = toShuffledXContent(getResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java index 101313f3001c6..8182e49049052 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java @@ -71,7 +71,7 @@ private static MultiGetResponse createTestInstance() { if (randomBoolean()) { items[i] = new MultiGetItemResponse(new GetResponse(new GetResult( randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), 0, 1, randomNonNegativeLong(), - true, null, null + true, null, null, null )), null); } else { items[i] = new MultiGetItemResponse(null, new MultiGetResponse.Failure(randomAlphaOfLength(4), diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 642d14e2258cb..6549c3a8df5e1 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -359,7 +359,7 @@ public void testNowInScript() throws IOException { .scriptedUpsert(true); long nowInMillis = randomNonNegativeLong(); // We simulate that the document is not existing yet - GetResult getResult = new GetResult("test", "type1", "2", UNASSIGNED_SEQ_NO, 0, 0, false, null, null); + GetResult getResult = new GetResult("test", "type1", "2", UNASSIGNED_SEQ_NO, 0, 0, false, null, null, null); UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult, () -> nowInMillis); Streamable action = result.action(); assertThat(action, instanceOf(IndexRequest.class)); @@ -372,7 +372,7 @@ public void testNowInScript() throws IOException { .script(mockInlineScript("ctx._timestamp = ctx._now")) .scriptedUpsert(true); // We simulate that the document is not existing yet - GetResult getResult = new GetResult("test", "type1", "2", 0, 1, 0, true, new BytesArray("{}"), null); + GetResult getResult = new GetResult("test", "type1", "2", 0, 1, 0, true, new BytesArray("{}"), null, null); UpdateHelper.Result result = updateHelper.prepare(new ShardId("test", "_na_", 0), updateRequest, getResult, () -> 42L); Streamable action = result.action(); assertThat(action, instanceOf(IndexRequest.class)); @@ -381,7 +381,7 @@ public void testNowInScript() throws IOException { public void testIndexTimeout() { final GetResult getResult = - new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null); + new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null, null); final UpdateRequest updateRequest = new UpdateRequest("test", "type", "1") .script(mockInlineScript("return")) @@ -391,7 +391,7 @@ public void testIndexTimeout() { public void testDeleteTimeout() { final GetResult getResult = - new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null); + new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null, null); final UpdateRequest updateRequest = new UpdateRequest("test", "type", "1") .script(mockInlineScript("ctx.op = delete")) @@ -402,7 +402,7 @@ public void testDeleteTimeout() { public void testUpsertTimeout() throws IOException { final boolean exists = randomBoolean(); final BytesReference source = exists ? new BytesArray("{\"f\":\"v\"}") : null; - final GetResult getResult = new GetResult("test", "type", "1", UNASSIGNED_SEQ_NO, 0, 0, exists, source, null); + final GetResult getResult = new GetResult("test", "type", "1", UNASSIGNED_SEQ_NO, 0, 0, exists, source, null, null); final XContentBuilder sourceBuilder = jsonBuilder(); sourceBuilder.startObject(); { @@ -546,7 +546,7 @@ public void testValidate() { } public void testRoutingExtraction() throws Exception { - GetResult getResult = new GetResult("test", "type", "1", UNASSIGNED_SEQ_NO, 0, 0, false, null, null); + GetResult getResult = new GetResult("test", "type", "1", UNASSIGNED_SEQ_NO, 0, 0, false, null, null, null); IndexRequest indexRequest = new IndexRequest("test", "type", "1"); // There is no routing and parent because the document doesn't exist @@ -556,7 +556,7 @@ public void testRoutingExtraction() throws Exception { assertNull(UpdateHelper.calculateRouting(getResult, indexRequest)); // Doc exists but has no source or fields - getResult = new GetResult("test", "type", "1", 0, 1, 0, true, null, null); + getResult = new GetResult("test", "type", "1", 0, 1, 0, true, null, null, null); // There is no routing and parent on either request assertNull(UpdateHelper.calculateRouting(getResult, indexRequest)); @@ -565,7 +565,7 @@ public void testRoutingExtraction() throws Exception { fields.put("_routing", new DocumentField("_routing", Collections.singletonList("routing1"))); // Doc exists and has the parent and routing fields - getResult = new GetResult("test", "type", "1", 0, 1, 0, true, null, fields); + getResult = new GetResult("test", "type", "1", 0, 1, 0, true, null, fields, null); // Use the get result parent and routing assertThat(UpdateHelper.calculateRouting(getResult, indexRequest), equalTo("routing1")); @@ -575,7 +575,7 @@ public void testNoopDetection() throws Exception { ShardId shardId = new ShardId("test", "", 0); GetResult getResult = new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"body\": \"foo\"}"), - null); + null, null); UpdateRequest request = new UpdateRequest("test", "type1", "1").fromXContent( createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"foo\"}}"))); @@ -606,7 +606,7 @@ public void testUpdateScript() throws Exception { ShardId shardId = new ShardId("test", "", 0); GetResult getResult = new GetResult("test", "type", "1", 0, 1, 0, true, new BytesArray("{\"body\": \"bar\"}"), - null); + null, null); UpdateRequest request = new UpdateRequest("test", "type1", "1") .script(mockInlineScript("ctx._source.body = \"foo\"")); diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java index 8ec0423b40699..babad0276917d 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java @@ -74,7 +74,7 @@ public void testToXContent() throws IOException { UpdateResponse updateResponse = new UpdateResponse(new ReplicationResponse.ShardInfo(3, 2), new ShardId("books", "books_uuid", 2), "book", "1", 7, 17, 2, UPDATED); - updateResponse.setGetResult(new GetResult("books", "book", "1",0, 1, 2, true, source, fields)); + updateResponse.setGetResult(new GetResult("books", "book", "1",0, 1, 2, true, source, fields, null)); String output = Strings.toString(updateResponse); assertEquals("{\"_index\":\"books\",\"_type\":\"book\",\"_id\":\"1\",\"_version\":2,\"result\":\"updated\"," + diff --git a/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java b/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java index ad8673d13ea6b..5758fb5bcb971 100644 --- a/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java +++ b/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java @@ -76,14 +76,15 @@ public void testToXContent() throws IOException { { GetResult getResult = new GetResult("index", "type", "id", 0, 1, 1, true, new BytesArray("{ \"field1\" : " + "\"value1\", \"field2\":\"value2\"}"), singletonMap("field1", new DocumentField("field1", - singletonList("value1")))); + singletonList("value1"))), singletonMap("field1", new DocumentField("metafield", + singletonList("metavalue")))); String output = Strings.toString(getResult); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + - "\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"},\"fields\":{\"field1\":[\"value1\"]}}", - output); + "\"metafield\":\"metavalue\",\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"}," + + "\"fields\":{\"field1\":[\"value1\"]}}", output); } { - GetResult getResult = new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null); + GetResult getResult = new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); String output = Strings.toString(getResult); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"found\":false}", output); } @@ -96,7 +97,8 @@ public void testToAndFromXContentEmbedded() throws Exception { // We don't expect to retrieve the index/type/id of the GetResult because they are not rendered // by the toXContentEmbedded method. GetResult expectedGetResult = new GetResult(null, null, null, tuple.v2().getSeqNo(), tuple.v2().getPrimaryTerm(), -1, - tuple.v2().isExists(), tuple.v2().sourceRef(), tuple.v2().getFields()); + tuple.v2().isExists(), tuple.v2().sourceRef(), tuple.v2().getDocumentFields(), + tuple.v2().getMetadataFields()); boolean humanReadable = randomBoolean(); BytesReference originalBytes = toXContentEmbedded(getResult, xContentType, humanReadable); @@ -122,7 +124,7 @@ public void testToXContentEmbedded() throws IOException { fields.put("baz", new DocumentField("baz", Arrays.asList("baz_0", "baz_1"))); GetResult getResult = new GetResult("index", "type", "id", 0, 1, 2, true, - new BytesArray("{\"foo\":\"bar\",\"baz\":[\"baz_0\",\"baz_1\"]}"), fields); + new BytesArray("{\"foo\":\"bar\",\"baz\":[\"baz_0\",\"baz_1\"]}"), fields, null); BytesReference originalBytes = toXContentEmbedded(getResult, XContentType.JSON, false); assertEquals("{\"_seq_no\":0,\"_primary_term\":1,\"found\":true,\"_source\":{\"foo\":\"bar\",\"baz\":[\"baz_0\",\"baz_1\"]}," + @@ -130,7 +132,7 @@ public void testToXContentEmbedded() throws IOException { } public void testToXContentEmbeddedNotFound() throws IOException { - GetResult getResult = new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null); + GetResult getResult = new GetResult("index", "type", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); BytesReference originalBytes = toXContentEmbedded(getResult, XContentType.JSON, false); assertEquals("{\"found\":false}", originalBytes.utf8ToString()); @@ -154,33 +156,33 @@ public void testEqualsAndHashcode() { public static GetResult copyGetResult(GetResult getResult) { return new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields()); + getResult.isExists(), getResult.internalSourceRef(), getResult.getDocumentFields(), getResult.getMetadataFields()); } public static GetResult mutateGetResult(GetResult getResult) { List> mutations = new ArrayList<>(); mutations.add(() -> new GetResult(randomUnicodeOfLength(15), getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields())); + getResult.isExists(), getResult.internalSourceRef(), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), randomUnicodeOfLength(15), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields())); + getResult.isExists(), getResult.internalSourceRef(), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), randomUnicodeOfLength(15), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields())); + getResult.isExists(), getResult.internalSourceRef(), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), randomNonNegativeLong(), - getResult.isExists(), getResult.internalSourceRef(), getResult.getFields())); + getResult.isExists(), getResult.internalSourceRef(), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.isExists() ? UNASSIGNED_SEQ_NO : getResult.getSeqNo(), getResult.isExists() ? 0 : getResult.getPrimaryTerm(), - getResult.getVersion(), getResult.isExists() == false, getResult.internalSourceRef(), getResult.getFields())); + getResult.getVersion(), getResult.isExists() == false, getResult.internalSourceRef(), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), getResult.isExists(), - RandomObjects.randomSource(random()), getResult.getFields())); + RandomObjects.randomSource(random()), getResult.getFields(), null)); mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getSeqNo(), getResult.getPrimaryTerm(), getResult.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), randomDocumentFields(XContentType.JSON).v1())); + getResult.isExists(), getResult.internalSourceRef(), randomDocumentFields(XContentType.JSON).v1(), null)); return randomFrom(mutations).get(); } @@ -195,6 +197,8 @@ public static Tuple randomGetResult(XContentType xContentT BytesReference source = null; Map fields = null; Map expectedFields = null; + Map metaFields = null; + Map expectedMetaFields = null; if (frequently()) { version = randomNonNegativeLong(); seqNo = randomNonNegativeLong(); @@ -205,8 +209,13 @@ public static Tuple randomGetResult(XContentType xContentT } if (randomBoolean()) { Tuple, Map> tuple = randomDocumentFields(xContentType); - fields = tuple.v1(); - expectedFields = tuple.v2(); + fields = new HashMap<>(); + metaFields = new HashMap<>(); + GetResult.splitFieldsByMetadata(tuple.v1(), fields, metaFields); + + expectedFields = new HashMap<>(); + expectedMetaFields = new HashMap<>(); + GetResult.splitFieldsByMetadata(tuple.v2(), expectedFields, expectedMetaFields); } } else { seqNo = UNASSIGNED_SEQ_NO; @@ -214,8 +223,9 @@ public static Tuple randomGetResult(XContentType xContentT version = -1; exists = false; } - GetResult getResult = new GetResult(index, type, id, seqNo, primaryTerm, version, exists, source, fields); - GetResult expectedGetResult = new GetResult(index, type, id, seqNo, primaryTerm, version, exists, source, expectedFields); + GetResult getResult = new GetResult(index, type, id, seqNo, primaryTerm, version, exists, source, fields, metaFields); + GetResult expectedGetResult = new GetResult(index, type, id, seqNo, primaryTerm, version, exists, source, + expectedFields, expectedMetaFields); return Tuple.tuple(getResult, expectedGetResult); } diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 4851387b1a497..62cc7a43cd2c2 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -151,7 +151,7 @@ protected GetResponse executeGet(GetRequest getRequest) { throw new ElasticsearchException("boom", ex); } return new GetResponse(new GetResult(indexedShapeIndex, indexedType, indexedShapeId, 0, 1, 0, true, new BytesArray(json), - null)); + null, null)); } @After diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index a9080c688f64f..40e32b91d7e55 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -210,7 +210,7 @@ public GetResponse executeGet(GetRequest getRequest) { throw new ElasticsearchException("boom", ex); } return new GetResponse(new GetResult(getRequest.index(), getRequest.type(), getRequest.id(), 0, 1, 0, true, - new BytesArray(json), null)); + new BytesArray(json), null, null)); } public void testNumeric() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestGetSourceActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestGetSourceActionTests.java index f012c1393c9ad..53d78c7d03e84 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestGetSourceActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/document/RestGetSourceActionTests.java @@ -96,7 +96,7 @@ public void testTypeParameter() { public void testRestGetSourceAction() throws Exception { final BytesReference source = new BytesArray("{\"foo\": \"bar\"}"); final GetResponse response = - new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, true, source, emptyMap())); + new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, true, source, emptyMap(), null)); final RestResponse restResponse = listener.buildResponse(response); @@ -107,7 +107,7 @@ public void testRestGetSourceAction() throws Exception { public void testRestGetSourceActionWithMissingDocument() { final GetResponse response = - new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, false, null, emptyMap())); + new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, false, null, emptyMap(), null)); final ResourceNotFoundException exception = expectThrows(ResourceNotFoundException.class, () -> listener.buildResponse(response)); @@ -116,7 +116,7 @@ public void testRestGetSourceActionWithMissingDocument() { public void testRestGetSourceActionWithMissingDocumentSource() { final GetResponse response = - new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, true, null, emptyMap())); + new GetResponse(new GetResult("index1", "_doc", "1", UNASSIGNED_SEQ_NO, 0, -1, true, null, emptyMap(), null)); final ResourceNotFoundException exception = expectThrows(ResourceNotFoundException.class, () -> listener.buildResponse(response)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 67ce5ce2b27af..d8a7d9447946b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -1249,11 +1249,12 @@ public void testApiKeyAuth() { creatorMap.put("realm", "auth realm"); source.put("creator", creatorMap); GetResponse getResponse = new GetResponse(new GetResult(request.index(), request.type(), request.id(), 0, 1, 1L, true, - BytesReference.bytes(JsonXContent.contentBuilder().map(source)), Collections.emptyMap())); + BytesReference.bytes(JsonXContent.contentBuilder().map(source)), Collections.emptyMap(), Collections.emptyMap())); listener.onResponse(getResponse); } else { listener.onResponse(new GetResponse(new GetResult(request.index(), request.type(), request.id(), - SequenceNumbers.UNASSIGNED_SEQ_NO, 1, -1L, false, null, Collections.emptyMap()))); + SequenceNumbers.UNASSIGNED_SEQ_NO, 1, -1L, false, null, + Collections.emptyMap(), Collections.emptyMap()))); } return Void.TYPE; }).when(client).get(any(GetRequest.class), any(ActionListener.class)); @@ -1288,11 +1289,12 @@ public void testExpiredApiKey() { creatorMap.put("realm", "auth realm"); source.put("creator", creatorMap); GetResponse getResponse = new GetResponse(new GetResult(request.index(), request.type(), request.id(), 0, 1, 1L, true, - BytesReference.bytes(JsonXContent.contentBuilder().map(source)), Collections.emptyMap())); + BytesReference.bytes(JsonXContent.contentBuilder().map(source)), Collections.emptyMap(), Collections.emptyMap())); listener.onResponse(getResponse); } else { listener.onResponse(new GetResponse(new GetResult(request.index(), request.type(), request.id(), - SequenceNumbers.UNASSIGNED_SEQ_NO, 1, -1L, false, null, Collections.emptyMap()))); + SequenceNumbers.UNASSIGNED_SEQ_NO, 1, -1L, false, null, + Collections.emptyMap(), Collections.emptyMap()))); } return Void.TYPE; }).when(client).get(any(GetRequest.class), any(ActionListener.class)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java index 4cbf5307d3ed6..53eb3fc0bdbc4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java @@ -118,6 +118,7 @@ public void testBlankPasswordInIndexImpliesDefaultPassword() throws Exception { 0, 1, 1L, true, BytesReference.bytes(jsonBuilder().map(values)), + Collections.emptyMap(), Collections.emptyMap()); final PlainActionFuture future = new PlainActionFuture<>(); @@ -187,6 +188,7 @@ public void testVerifyNonExistentUser() throws Exception { UNASSIGNED_SEQ_NO, 0, 1L, false, null, + Collections.emptyMap(), Collections.emptyMap()); actionRespond(GetRequest.class, new GetResponse(getResult)); @@ -229,6 +231,7 @@ private void respondToGetUserRequest(String username, SecureString password, Str 0, 1, 1L, true, source, + Collections.emptyMap(), Collections.emptyMap()); actionRespond(GetRequest.class, new GetResponse(getResult)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 7f7a262131bb2..d50663b9d7cab 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -132,7 +132,8 @@ public void testGetSinglePrivilegeByName() throws Exception { final String docSource = Strings.toString(sourcePrivilege); listener.get().onResponse(new GetResponse( - new GetResult(request.index(), request.type(), request.id(), 0, 1, 1L, true, new BytesArray(docSource), emptyMap()) + new GetResult(request.index(), request.type(), request.id(), 0, 1, 1L, true, + new BytesArray(docSource), emptyMap(), emptyMap()) )); final ApplicationPrivilegeDescriptor getPrivilege = future.get(1, TimeUnit.SECONDS); assertThat(getPrivilege, equalTo(sourcePrivilege)); @@ -149,7 +150,8 @@ public void testGetMissingPrivilege() throws Exception { assertThat(request.id(), equalTo("application-privilege_myapp:admin")); listener.get().onResponse(new GetResponse( - new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, -1, false, null, emptyMap()) + new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, -1, + false, null, emptyMap(), emptyMap()) )); final ApplicationPrivilegeDescriptor getPrivilege = future.get(1, TimeUnit.SECONDS); assertThat(getPrivilege, Matchers.nullValue()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java index 3476a3d7c00a3..20108b0114933 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java @@ -65,7 +65,8 @@ public static SecurityIndexManager mockSecurityIndexManager(boolean exists, bool } public static void mockGetRequest(Client client, String documentId, BytesReference source) { - GetResult result = new GetResult(SECURITY_MAIN_ALIAS, SINGLE_MAPPING_NAME, documentId, 0, 1, 1, true, source, emptyMap()); + GetResult result = new GetResult(SECURITY_MAIN_ALIAS, SINGLE_MAPPING_NAME, documentId, 0, 1, 1, true, source, + emptyMap(), emptyMap()); mockGetRequest(client, documentId, result); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index 80cb657a5762e..fd06045204710 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -1217,7 +1217,8 @@ private void mockGetWatchResponse(Client client, String id, GetResponse response listener.onResponse(response); } else { GetResult notFoundResult = - new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, -1, false, null, null); + new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, + -1, false, null, null, null); listener.onResponse(new GetResponse(notFoundResult)); } return null; @@ -1232,7 +1233,8 @@ private void mockGetWatchException(Client client, String id, Exception e) { listener.onFailure(e); } else { GetResult notFoundResult = - new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, -1, false, null, null); + new GetResult(request.index(), request.type(), request.id(), UNASSIGNED_SEQ_NO, 0, -1, + false, null, null, null); listener.onResponse(new GetResponse(notFoundResult)); } return null; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java index 726a46799d401..0f7d64527fe26 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java @@ -71,7 +71,7 @@ public void testWatchNotFound() { doAnswer(invocation -> { ActionListener listener = (ActionListener) invocation.getArguments()[1]; listener.onResponse(new GetResponse(new GetResult(Watch.INDEX, MapperService.SINGLE_MAPPING_NAME, watchId, UNASSIGNED_SEQ_NO, - 0, -1, false, BytesArray.EMPTY, Collections.emptyMap()))); + 0, -1, false, BytesArray.EMPTY, Collections.emptyMap(), Collections.emptyMap()))); return null; }).when(client).get(anyObject(), anyObject()); From 81f3b5d4c288cee50d4484d77c3259a84b7a9562 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 23 May 2019 20:18:34 +0200 Subject: [PATCH 233/321] Improve Close Index Response (#39687) This changes the `CloseIndexResponse` so that it reports closing result for each index. Shard failures or exception are also reported per index, and the global acknowledgment flag is computed from the index results only. The response looks like: ``` { "acknowledged" : true, "shards_acknowledged" : true, "indices" : { "docs" : { "closed" : true } } } ``` The response reports shard failures like: ``` { "acknowledged" : false, "shards_acknowledged" : false, "indices" : { "docs-1" : { "closed" : true }, "docs-2" : { "closed" : false, "shards" : { "1" : { "failures" : [ { "shard" : 1, "index" : "docs-2", "status" : "BAD_REQUEST", "reason" : { "type" : "index_closed_exception", "reason" : "closed", "index_uuid" : "JFmQwr_aSPiZbkAH_KEF7A", "index" : "docs-2" } } ] } } }, "docs-3" : { "closed" : true } } } ``` Co-authored-by: Tanguy Leroux --- .../test/indices.open/10_basic.yml | 37 +++ .../indices/close/CloseIndexResponse.java | 247 +++++++++++++++++- .../close/TransportCloseIndexAction.java | 4 +- .../metadata/MetaDataIndexStateService.java | 97 +++---- .../close/CloseIndexResponseTests.java | 139 +++++++++- .../MetaDataIndexStateServiceTests.java | 62 ++++- .../MetaDataIndexStateServiceUtils.java | 6 +- .../indices/cluster/ClusterStateChanges.java | 6 +- .../indices/state/CloseIndexIT.java | 52 +++- .../CloseFollowerIndexStepTests.java | 2 +- 10 files changed, 578 insertions(+), 74 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml index a8ab29d9feb97..8bc8ce6c4c871 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml @@ -79,3 +79,40 @@ - is_true: acknowledged - match: { acknowledged: true } - match: { shards_acknowledged: true } +--- +"Close index response with result per index": + - skip: + version: " - 7.99.99" + reason: "close index response reports result per index starting version 8.0.0" + + - do: + indices.create: + index: index_1 + body: + settings: + number_of_replicas: 0 + + - do: + indices.create: + index: index_2 + body: + settings: + number_of_replicas: 0 + + - do: + indices.create: + index: index_3 + body: + settings: + number_of_replicas: 0 + + - do: + indices.close: + index: "index_*" + + - match: { acknowledged: true } + - match: { shards_acknowledged: true } + - match: { indices.index_1.closed: true } + - match: { indices.index_2.closed: true } + - match: { indices.index_3.closed: true } + diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index ea44ba7a8e46b..ea7d14655c594 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -18,20 +18,40 @@ */ package org.elasticsearch.action.admin.indices.close; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; public class CloseIndexResponse extends ShardsAcknowledgedResponse { + private List indices; + CloseIndexResponse() { } - public CloseIndexResponse(final boolean acknowledged, final boolean shardsAcknowledged) { + public CloseIndexResponse(final boolean acknowledged, final boolean shardsAcknowledged, final List indices) { super(acknowledged, shardsAcknowledged); + this.indices = unmodifiableList(Objects.requireNonNull(indices)); + } + + public List getIndices() { + return indices; } @Override @@ -40,6 +60,11 @@ public void readFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_7_2_0)) { readShardsAcknowledged(in); } + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + indices = unmodifiableList(in.readList(IndexResult::new)); + } else { + indices = unmodifiableList(emptyList()); + } } @Override @@ -48,5 +73,225 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_7_2_0)) { writeShardsAcknowledged(out); } + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeList(indices); + } + } + + protected void addCustomFields(final XContentBuilder builder, final Params params) throws IOException { + super.addCustomFields(builder, params); + builder.startObject("indices"); + for (IndexResult index : indices) { + index.toXContent(builder, params); + } + builder.endObject(); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public static class IndexResult implements Writeable, ToXContentFragment { + + private final Index index; + private final @Nullable Exception exception; + private final @Nullable ShardResult[] shards; + + public IndexResult(final Index index) { + this(index, null, null); + } + + public IndexResult(final Index index, final Exception failure) { + this(index, Objects.requireNonNull(failure), null); + } + + public IndexResult(final Index index, final ShardResult[] shards) { + this(index, null, Objects.requireNonNull(shards)); + } + + private IndexResult(final Index index, @Nullable final Exception exception, @Nullable final ShardResult[] shards) { + this.index = Objects.requireNonNull(index); + this.exception = exception; + this.shards = shards; + } + + IndexResult(final StreamInput in) throws IOException { + this.index = new Index(in); + this.exception = in.readException(); + this.shards = in.readOptionalArray(ShardResult::new, ShardResult[]::new); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + index.writeTo(out); + out.writeException(exception); + out.writeOptionalArray(shards); + } + + public Index getIndex() { + return index; + } + + public Exception getException() { + return exception; + } + + public ShardResult[] getShards() { + return shards; + } + + public boolean hasFailures() { + if (exception != null) { + return true; + } + if (shards != null) { + for (ShardResult shard : shards) { + if (shard.hasFailures()) { + return true; + } + } + } + return false; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(index.getName()); + { + if (hasFailures()) { + builder.field("closed", false); + if (exception != null) { + builder.startObject("exception"); + ElasticsearchException.generateFailureXContent(builder, params, exception, true); + builder.endObject(); + } else { + builder.startObject("failedShards"); + for (ShardResult shard : shards) { + if (shard.hasFailures()) { + shard.toXContent(builder, params); + } + } + builder.endObject(); + } + } else { + builder.field("closed", true); + } + } + return builder.endObject(); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + public static class ShardResult implements Writeable, ToXContentFragment { + + private final int id; + private final ShardResult.Failure[] failures; + + public ShardResult(final int id, final Failure[] failures) { + this.id = id; + this.failures = failures; + } + + ShardResult(final StreamInput in) throws IOException { + this.id = in.readVInt(); + this.failures = in.readOptionalArray(Failure::readFailure, ShardResult.Failure[]::new); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVInt(id); + out.writeOptionalArray(failures); + } + + public boolean hasFailures() { + return failures != null && failures.length > 0; + } + + public int getId() { + return id; + } + + public Failure[] getFailures() { + return failures; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(String.valueOf(id)); + { + builder.startArray("failures"); + if (failures != null) { + for (Failure failure : failures) { + builder.startObject(); + failure.toXContent(builder, params); + builder.endObject(); + } + } + builder.endArray(); + } + return builder.endObject(); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public static class Failure extends DefaultShardOperationFailedException implements Writeable { + + private @Nullable String nodeId; + + private Failure() { + } + + public Failure(final String index, final int shardId, final Throwable reason) { + this(index, shardId, reason, null); + } + + public Failure(final String index, final int shardId, final Throwable reason, final String nodeId) { + super(index, shardId, reason); + this.nodeId = nodeId; + } + + public String getNodeId() { + return nodeId; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + nodeId = in.readOptionalString(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(nodeId); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + if (nodeId != null) { + builder.field("node", nodeId); + } + return super.toXContent(builder, params); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + static Failure readFailure(final StreamInput in) throws IOException { + final Failure failure = new Failure(); + failure.readFrom(in); + return failure; + } + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index a6f4b6f3d0c4a..3c231d13845b2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -40,6 +40,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Collections; + /** * Close index action */ @@ -109,7 +111,7 @@ protected void masterOperation(final Task task, final ActionListener listener) throws Exception { final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); if (concreteIndices == null || concreteIndices.length == 0) { - listener.onResponse(new CloseIndexResponse(true, false)); + listener.onResponse(new CloseIndexResponse(true, false, Collections.emptyList())); return; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 80be71dadd3d6..ef4583e98e544 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -29,10 +29,11 @@ import org.elasticsearch.action.NotifyOnceListener; import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse.IndexResult; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse.ShardResult; import org.elasticsearch.action.admin.indices.close.TransportVerifyShardBeforeCloseAction; import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; import org.elasticsearch.action.support.ActiveShardsObserver; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; @@ -52,6 +53,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.collect.ImmutableOpenIntMap; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -72,6 +74,8 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; @@ -144,27 +148,22 @@ public ClusterState execute(final ClusterState currentState) { public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { if (oldState == newState) { assert blockedIndices.isEmpty() : "List of blocked indices is not empty but cluster state wasn't changed"; - listener.onResponse(new CloseIndexResponse(true, false)); + listener.onResponse(new CloseIndexResponse(true, false, Collections.emptyList())); } else { assert blockedIndices.isEmpty() == false : "List of blocked indices is empty but cluster state was changed"; threadPool.executor(ThreadPool.Names.MANAGEMENT) .execute(new WaitForClosedBlocksApplied(blockedIndices, request, - ActionListener.wrap(results -> + ActionListener.wrap(verifyResults -> clusterService.submitStateUpdateTask("close-indices", new ClusterStateUpdateTask(Priority.URGENT) { - - boolean acknowledged = true; + private final List indices = new ArrayList<>(); @Override public ClusterState execute(final ClusterState currentState) throws Exception { - final ClusterState updatedState = closeRoutingTable(currentState, blockedIndices, results); - for (Map.Entry result : results.entrySet()) { - IndexMetaData updatedMetaData = updatedState.metaData().index(result.getKey()); - if (updatedMetaData != null && updatedMetaData.getState() != IndexMetaData.State.CLOSE) { - acknowledged = false; - break; - } - } - return allocationService.reroute(updatedState, "indices closed"); + Tuple> closingResult = + closeRoutingTable(currentState, blockedIndices, verifyResults); + assert verifyResults.size() == closingResult.v2().size(); + indices.addAll(closingResult.v2()); + return allocationService.reroute(closingResult.v1(), "indices closed"); } @Override @@ -176,27 +175,28 @@ public void onFailure(final String source, final Exception e) { public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { - final String[] indices = results.entrySet().stream() - .filter(result -> result.getValue().isAcknowledged()) - .map(result -> result.getKey().getName()) - .filter(index -> newState.routingTable().hasIndex(index)) + final boolean acknowledged = indices.stream().noneMatch(IndexResult::hasFailures); + final String[] waitForIndices = indices.stream() + .filter(result -> result.hasFailures() == false) + .filter(result -> newState.routingTable().hasIndex(result.getIndex())) + .map(result -> result.getIndex().getName()) .toArray(String[]::new); - if (indices.length > 0) { - activeShardsObserver.waitForActiveShards(indices, request.waitForActiveShards(), + if (waitForIndices.length > 0) { + activeShardsObserver.waitForActiveShards(waitForIndices, request.waitForActiveShards(), request.ackTimeout(), shardsAcknowledged -> { if (shardsAcknowledged == false) { logger.debug("[{}] indices closed, but the operation timed out while waiting " + - "for enough shards to be started.", Arrays.toString(indices)); + "for enough shards to be started.", Arrays.toString(waitForIndices)); } // acknowledged maybe be false but some indices may have been correctly closed, so // we maintain a kind of coherency by overriding the shardsAcknowledged value // (see ShardsAcknowledgedResponse constructor) boolean shardsAcked = acknowledged ? shardsAcknowledged : false; - listener.onResponse(new CloseIndexResponse(acknowledged, shardsAcked)); + listener.onResponse(new CloseIndexResponse(acknowledged, shardsAcked, indices)); }, listener::onFailure); } else { - listener.onResponse(new CloseIndexResponse(acknowledged, false)); + listener.onResponse(new CloseIndexResponse(acknowledged, false, indices)); } } }), @@ -292,11 +292,11 @@ class WaitForClosedBlocksApplied extends AbstractRunnable { private final Map blockedIndices; private final CloseIndexClusterStateUpdateRequest request; - private final ActionListener> listener; + private final ActionListener> listener; private WaitForClosedBlocksApplied(final Map blockedIndices, final CloseIndexClusterStateUpdateRequest request, - final ActionListener> listener) { + final ActionListener> listener) { if (blockedIndices == null || blockedIndices.isEmpty()) { throw new IllegalArgumentException("Cannot wait for closed blocks to be applied, list of blocked indices is empty or null"); } @@ -312,7 +312,7 @@ public void onFailure(final Exception e) { @Override protected void doRun() throws Exception { - final Map results = ConcurrentCollections.newConcurrentMap(); + final Map results = ConcurrentCollections.newConcurrentMap(); final CountDown countDown = new CountDown(blockedIndices.size()); final ClusterState state = clusterService.state(); blockedIndices.forEach((index, block) -> { @@ -325,47 +325,51 @@ protected void doRun() throws Exception { }); } - private void waitForShardsReadyForClosing(final Index index, final ClusterBlock closingBlock, - final ClusterState state, final Consumer onResponse) { + private void waitForShardsReadyForClosing(final Index index, + final ClusterBlock closingBlock, + final ClusterState state, + final Consumer onResponse) { final IndexMetaData indexMetaData = state.metaData().index(index); if (indexMetaData == null) { logger.debug("index {} has been blocked before closing and is now deleted, ignoring", index); - onResponse.accept(new AcknowledgedResponse(true)); + onResponse.accept(new IndexResult(index)); return; } final IndexRoutingTable indexRoutingTable = state.routingTable().index(index); if (indexRoutingTable == null || indexMetaData.getState() == IndexMetaData.State.CLOSE) { assert state.blocks().hasIndexBlock(index.getName(), INDEX_CLOSED_BLOCK); logger.debug("index {} has been blocked before closing and is already closed, ignoring", index); - onResponse.accept(new AcknowledgedResponse(true)); + onResponse.accept(new IndexResult(index)); return; } final ImmutableOpenIntMap shards = indexRoutingTable.getShards(); - final AtomicArray results = new AtomicArray<>(shards.size()); + final AtomicArray results = new AtomicArray<>(shards.size()); final CountDown countDown = new CountDown(shards.size()); for (IntObjectCursor shard : shards) { final IndexShardRoutingTable shardRoutingTable = shard.value; - final ShardId shardId = shardRoutingTable.shardId(); + final int shardId = shardRoutingTable.shardId().id(); sendVerifyShardBeforeCloseRequest(shardRoutingTable, closingBlock, new NotifyOnceListener() { @Override public void innerOnResponse(final ReplicationResponse replicationResponse) { - ReplicationResponse.ShardInfo shardInfo = replicationResponse.getShardInfo(); - results.setOnce(shardId.id(), new AcknowledgedResponse(shardInfo.getFailed() == 0)); + ShardResult.Failure[] failures = Arrays.stream(replicationResponse.getShardInfo().getFailures()) + .map(f -> new ShardResult.Failure(f.index(), f.shardId(), f.getCause(), f.nodeId())) + .toArray(ShardResult.Failure[]::new); + results.setOnce(shardId, new ShardResult(shardId, failures)); processIfFinished(); } @Override public void innerOnFailure(final Exception e) { - results.setOnce(shardId.id(), new AcknowledgedResponse(false)); + ShardResult.Failure failure = new ShardResult.Failure(index.getName(), shardId, e); + results.setOnce(shardId, new ShardResult(shardId, new ShardResult.Failure[]{failure})); processIfFinished(); } private void processIfFinished() { if (countDown.countDown()) { - final boolean acknowledged = results.asList().stream().allMatch(AcknowledgedResponse::isAcknowledged); - onResponse.accept(new AcknowledgedResponse(acknowledged)); + onResponse.accept(new IndexResult(index, results.toArray(new ShardResult[results.length()]))); } } }); @@ -396,9 +400,9 @@ private void sendVerifyShardBeforeCloseRequest(final IndexShardRoutingTable shar /** * Step 3 - Move index states from OPEN to CLOSE in cluster state for indices that are ready for closing. */ - static ClusterState closeRoutingTable(final ClusterState currentState, - final Map blockedIndices, - final Map results) { + static Tuple> closeRoutingTable(final ClusterState currentState, + final Map blockedIndices, + final Map verifyResult) { // Remove the index routing table of closed indices if the cluster is in a mixed version // that does not support the replication of closed indices @@ -409,9 +413,10 @@ static ClusterState closeRoutingTable(final ClusterState currentState, final RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); final Set closedIndices = new HashSet<>(); - for (Map.Entry result : results.entrySet()) { + Map closingResults = new HashMap<>(verifyResult); + for (Map.Entry result : verifyResult.entrySet()) { final Index index = result.getKey(); - final boolean acknowledged = result.getValue().isAcknowledged(); + final boolean acknowledged = result.getValue().hasFailures() == false; try { if (acknowledged == false) { logger.debug("verification of shards before closing {} failed", index); @@ -424,7 +429,11 @@ static ClusterState closeRoutingTable(final ClusterState currentState, continue; } final ClusterBlock closingBlock = blockedIndices.get(index); + assert closingBlock != null; if (currentState.blocks().hasIndexBlock(index.getName(), closingBlock) == false) { + // we should report error in this case as the index can be left as open. + closingResults.put(result.getKey(), new IndexResult(result.getKey(), new IllegalStateException( + "verification of shards before closing " + index + " succeeded but block has been removed in the meantime"))); logger.debug("verification of shards before closing {} succeeded but block has been removed in the meantime", index); continue; } @@ -450,9 +459,9 @@ static ClusterState closeRoutingTable(final ClusterState currentState, logger.debug("index {} has been deleted since it was blocked before closing, ignoring", index); } } - logger.info("completed closing of indices {}", closedIndices); - return ClusterState.builder(currentState).blocks(blocks).metaData(metadata).routingTable(routingTable.build()).build(); + return Tuple.tuple(ClusterState.builder(currentState).blocks(blocks).metaData(metadata).routingTable(routingTable.build()).build(), + closingResults.values()); } public void openIndex(final OpenIndexClusterStateUpdateRequest request, diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java index f86beff7738e3..40c34af51598d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -19,14 +19,30 @@ package org.elasticsearch.action.admin.indices.close; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.ActionNotFoundTransportException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class CloseIndexResponseTests extends ESTestCase { @@ -47,11 +63,12 @@ public void testBwcSerialization() throws Exception { { final CloseIndexResponse response = randomResponse(); try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_1_0)); + out.setVersion(randomVersionBetween(random(), Version.V_7_0_0, getPreviousVersion(Version.V_7_2_0))); response.writeTo(out); final AcknowledgedResponse deserializedResponse = new AcknowledgedResponse(); try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(out.getVersion()); deserializedResponse.readFrom(in); } assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged())); @@ -64,22 +81,136 @@ public void testBwcSerialization() throws Exception { final CloseIndexResponse deserializedResponse = new CloseIndexResponse(); try (StreamInput in = out.bytes().streamInput()) { - in.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_1_0)); + in.setVersion(randomVersionBetween(random(), Version.V_7_0_0, getPreviousVersion(Version.V_7_2_0))); deserializedResponse.readFrom(in); } assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged())); } } + { + final CloseIndexResponse response = randomResponse(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + Version version = randomVersionBetween(random(), Version.V_7_2_0, Version.CURRENT); + out.setVersion(version); + response.writeTo(out); + final CloseIndexResponse deserializedResponse = new CloseIndexResponse(); + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(version); + deserializedResponse.readFrom(in); + } + assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged())); + assertThat(deserializedResponse.isShardsAcknowledged(), equalTo(response.isShardsAcknowledged())); + if (version.onOrAfter(Version.V_8_0_0)) { + assertThat(deserializedResponse.getIndices(), hasSize(response.getIndices().size())); + } else { + assertThat(deserializedResponse.getIndices(), empty()); + } + } + } } private CloseIndexResponse randomResponse() { - final boolean acknowledged = randomBoolean(); + boolean acknowledged = true; + final String[] indicesNames = generateRandomStringArray(10, 10, false, true); + + final List indexResults = new ArrayList<>(); + for (String indexName : indicesNames) { + final Index index = new Index(indexName, "_na_"); + if (randomBoolean()) { + indexResults.add(new CloseIndexResponse.IndexResult(index)); + } else { + if (randomBoolean()) { + acknowledged = false; + indexResults.add(new CloseIndexResponse.IndexResult(index, randomException(index, 0))); + } else { + final int nbShards = randomIntBetween(1, 5); + CloseIndexResponse.ShardResult[] shards = new CloseIndexResponse.ShardResult[nbShards]; + for (int i = 0; i < nbShards; i++) { + CloseIndexResponse.ShardResult.Failure[] failures = null; + if (randomBoolean()) { + acknowledged = false; + failures = new CloseIndexResponse.ShardResult.Failure[randomIntBetween(1, 3)]; + for (int j = 0; j < failures.length; j++) { + String nodeId = randomAlphaOfLength(5); + failures[j] = new CloseIndexResponse.ShardResult.Failure(indexName, i, randomException(index, i), nodeId); + } + } + shards[i] = new CloseIndexResponse.ShardResult(i, failures); + } + indexResults.add(new CloseIndexResponse.IndexResult(index, shards)); + } + } + + } + final boolean shardsAcknowledged = acknowledged ? randomBoolean() : false; - return new CloseIndexResponse(acknowledged, shardsAcknowledged); + return new CloseIndexResponse(acknowledged, shardsAcknowledged, indexResults); + } + + private static ElasticsearchException randomException(final Index index, final int id) { + return randomFrom( + new IndexNotFoundException(index), + new ActionNotFoundTransportException("test"), + new NoShardAvailableActionException(new ShardId(index, id))); } private static void assertCloseIndexResponse(final CloseIndexResponse actual, final CloseIndexResponse expected) { assertThat(actual.isAcknowledged(), equalTo(expected.isAcknowledged())); assertThat(actual.isShardsAcknowledged(), equalTo(expected.isShardsAcknowledged())); + + for (int i = 0; i < expected.getIndices().size(); i++) { + CloseIndexResponse.IndexResult expectedIndexResult = expected.getIndices().get(i); + CloseIndexResponse.IndexResult actualIndexResult = actual.getIndices().get(i); + assertThat(actualIndexResult.getIndex(), equalTo(expectedIndexResult.getIndex())); + assertThat(actualIndexResult.hasFailures(), equalTo(expectedIndexResult.hasFailures())); + + if (expectedIndexResult.hasFailures() == false) { + assertThat(actualIndexResult.getException(), nullValue()); + if (actualIndexResult.getShards() != null) { + assertThat(Arrays.stream(actualIndexResult.getShards()) + .allMatch(shardResult -> shardResult.hasFailures() == false), is(true)); + } + } + + if (expectedIndexResult.getException() != null) { + assertThat(actualIndexResult.getShards(), nullValue()); + assertThat(actualIndexResult.getException(), notNullValue()); + assertThat(actualIndexResult.getException().getMessage(), equalTo(expectedIndexResult.getException().getMessage())); + assertThat(actualIndexResult.getException().getClass(), equalTo(expectedIndexResult.getException().getClass())); + assertArrayEquals(actualIndexResult.getException().getStackTrace(), expectedIndexResult.getException().getStackTrace()); + } else { + assertThat(actualIndexResult.getException(), nullValue()); + } + + if (expectedIndexResult.getShards() != null) { + assertThat(actualIndexResult.getShards().length, equalTo(expectedIndexResult.getShards().length)); + + for (int j = 0; j < expectedIndexResult.getShards().length; j++) { + CloseIndexResponse.ShardResult expectedShardResult = expectedIndexResult.getShards()[j]; + CloseIndexResponse.ShardResult actualShardResult = actualIndexResult.getShards()[j]; + assertThat(actualShardResult.getId(), equalTo(expectedShardResult.getId())); + assertThat(actualShardResult.hasFailures(), equalTo(expectedShardResult.hasFailures())); + + if (expectedShardResult.hasFailures()) { + assertThat(actualShardResult.getFailures().length, equalTo(expectedShardResult.getFailures().length)); + + for (int k = 0; k < expectedShardResult.getFailures().length; k++) { + CloseIndexResponse.ShardResult.Failure expectedFailure = expectedShardResult.getFailures()[k]; + CloseIndexResponse.ShardResult.Failure actualFailure = actualShardResult.getFailures()[k]; + assertThat(actualFailure.getNodeId(), equalTo(expectedFailure.getNodeId())); + assertThat(actualFailure.index(), equalTo(expectedFailure.index())); + assertThat(actualFailure.shardId(), equalTo(expectedFailure.shardId())); + assertThat(actualFailure.getCause().getMessage(), equalTo(expectedFailure.getCause().getMessage())); + assertThat(actualFailure.getCause().getClass(), equalTo(expectedFailure.getCause().getClass())); + assertArrayEquals(actualFailure.getCause().getStackTrace(), expectedFailure.getCause().getStackTrace()); + } + } else { + assertThat(actualShardResult.getFailures(), nullValue()); + } + } + } else { + assertThat(actualIndexResult.getShards(), nullValue()); + } + } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java index 36bca0be1c2d5..b655a98379553 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceTests.java @@ -20,7 +20,8 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.Version; -import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse.IndexResult; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.RestoreInProgress; @@ -50,6 +51,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -69,6 +71,7 @@ import static org.elasticsearch.cluster.shards.ClusterShardLimitIT.ShardCounts.forDataNodeCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -80,7 +83,7 @@ public class MetaDataIndexStateServiceTests extends ESTestCase { public void testCloseRoutingTable() { final Set nonBlockedIndices = new HashSet<>(); final Map blockedIndices = new HashMap<>(); - final Map results = new HashMap<>(); + final Map results = new HashMap<>(); ClusterState state = ClusterState.builder(new ClusterName("testCloseRoutingTable")).build(); for (int i = 0; i < randomIntBetween(1, 25); i++) { @@ -92,12 +95,17 @@ public void testCloseRoutingTable() { } else { final ClusterBlock closingBlock = MetaDataIndexStateService.createIndexClosingBlock(); state = addBlockedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state, closingBlock); - blockedIndices.put(state.metaData().index(indexName).getIndex(), closingBlock); - results.put(state.metaData().index(indexName).getIndex(), new AcknowledgedResponse(randomBoolean())); + final Index index = state.metaData().index(indexName).getIndex(); + blockedIndices.put(index, closingBlock); + if (randomBoolean()) { + results.put(index, new CloseIndexResponse.IndexResult(index)); + } else { + results.put(index, new CloseIndexResponse.IndexResult(index, new Exception("test"))); + } } } - final ClusterState updatedState = MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results); + final ClusterState updatedState = MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results).v1(); assertThat(updatedState.metaData().indices().size(), equalTo(nonBlockedIndices.size() + blockedIndices.size())); for (Index nonBlockedIndex : nonBlockedIndices) { @@ -105,7 +113,7 @@ public void testCloseRoutingTable() { assertThat(updatedState.blocks().hasIndexBlockWithId(nonBlockedIndex.getName(), INDEX_CLOSED_BLOCK_ID), is(false)); } for (Index blockedIndex : blockedIndices.keySet()) { - if (results.get(blockedIndex).isAcknowledged()) { + if (results.get(blockedIndex).hasFailures() == false) { assertIsClosed(blockedIndex.getName(), updatedState); } else { assertIsOpened(blockedIndex.getName(), updatedState); @@ -117,7 +125,7 @@ public void testCloseRoutingTable() { public void testCloseRoutingTableRemovesRoutingTable() { final Set nonBlockedIndices = new HashSet<>(); final Map blockedIndices = new HashMap<>(); - final Map results = new HashMap<>(); + final Map results = new HashMap<>(); final ClusterBlock closingBlock = MetaDataIndexStateService.createIndexClosingBlock(); ClusterState state = ClusterState.builder(new ClusterName("testCloseRoutingTableRemovesRoutingTable")).build(); @@ -129,8 +137,13 @@ public void testCloseRoutingTableRemovesRoutingTable() { nonBlockedIndices.add(state.metaData().index(indexName).getIndex()); } else { state = addBlockedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state, closingBlock); - blockedIndices.put(state.metaData().index(indexName).getIndex(), closingBlock); - results.put(state.metaData().index(indexName).getIndex(), new AcknowledgedResponse(randomBoolean())); + final Index index = state.metaData().index(indexName).getIndex(); + blockedIndices.put(index, closingBlock); + if (randomBoolean()) { + results.put(index, new CloseIndexResponse.IndexResult(index)); + } else { + results.put(index, new CloseIndexResponse.IndexResult(index, new Exception("test"))); + } } } @@ -142,7 +155,7 @@ public void testCloseRoutingTableRemovesRoutingTable() { new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.V_7_2_0))) .build(); - state = MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results); + state = MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results).v1(); assertThat(state.metaData().indices().size(), equalTo(nonBlockedIndices.size() + blockedIndices.size())); for (Index nonBlockedIndex : nonBlockedIndices) { @@ -150,7 +163,7 @@ public void testCloseRoutingTableRemovesRoutingTable() { assertThat(state.blocks().hasIndexBlockWithId(nonBlockedIndex.getName(), INDEX_CLOSED_BLOCK_ID), is(false)); } for (Index blockedIndex : blockedIndices.keySet()) { - if (results.get(blockedIndex).isAcknowledged()) { + if (results.get(blockedIndex).hasFailures() == false) { IndexMetaData indexMetaData = state.metaData().index(blockedIndex); assertThat(indexMetaData.getState(), is(IndexMetaData.State.CLOSE)); Settings indexSettings = indexMetaData.getSettings(); @@ -329,6 +342,33 @@ public void testIsIndexVerifiedBeforeClosed() { } } + public void testCloseFailedIfBlockDisappeared() { + ClusterState state = ClusterState.builder(new ClusterName("failedIfBlockDisappeared")).build(); + Map blockedIndices = new HashMap<>(); + int numIndices = between(1, 10); + Set disappearedIndices = new HashSet<>(); + Map verifyResults = new HashMap<>(); + for (int i = 0; i < numIndices; i++) { + String indexName = "test-" + i; + state = addOpenedIndex(indexName, randomIntBetween(1, 3), randomIntBetween(0, 3), state); + Index index = state.metaData().index(indexName).getIndex(); + state = MetaDataIndexStateService.addIndexClosedBlocks(new Index[]{index}, blockedIndices, state); + if (randomBoolean()) { + state = ClusterState.builder(state) + .blocks(ClusterBlocks.builder().blocks(state.blocks()).removeIndexBlocks(indexName).build()) + .build(); + disappearedIndices.add(index); + } + verifyResults.put(index, new IndexResult(index)); + } + Collection closingResults = + MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, unmodifiableMap(verifyResults)).v2(); + assertThat(closingResults, hasSize(numIndices)); + Set failedIndices = closingResults.stream().filter(IndexResult::hasFailures) + .map(IndexResult::getIndex).collect(Collectors.toSet()); + assertThat(failedIndices, equalTo(disappearedIndices)); + } + public static ClusterState createClusterForShardLimitTest(int nodesInCluster, int openIndexShards, int openIndexReplicas, int closedIndexShards, int closedIndexReplicas, Settings clusterSettings) { ImmutableOpenMap.Builder dataNodes = ImmutableOpenMap.builder(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceUtils.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceUtils.java index 5ee6a7c60da3d..7c94a42bd0cb5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceUtils.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateServiceUtils.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.index.Index; @@ -43,7 +43,7 @@ public static ClusterState addIndexClosedBlocks(final Index[] indices, final Map */ public static ClusterState closeRoutingTable(final ClusterState state, final Map blockedIndices, - final Map results) { - return MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results); + final Map results) { + return MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results).v1(); } } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index a8c47f5d3ef39..433662f95d4e0 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; import org.elasticsearch.action.admin.indices.close.TransportVerifyShardBeforeCloseAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -40,7 +41,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeActionUtils; @@ -227,8 +227,8 @@ public ClusterState closeIndices(ClusterState state, CloseIndexRequest request) final Map blockedIndices = new HashMap<>(); ClusterState newState = MetaDataIndexStateServiceUtils.addIndexClosedBlocks(concreteIndices, blockedIndices, state); - newState = MetaDataIndexStateServiceUtils.closeRoutingTable(newState, blockedIndices, blockedIndices.keySet().stream() - .collect(Collectors.toMap(Function.identity(), index -> new AcknowledgedResponse(true)))); + newState = MetaDataIndexStateServiceUtils.closeRoutingTable(newState, blockedIndices, + blockedIndices.keySet().stream().collect(Collectors.toMap(Function.identity(), CloseIndexResponse.IndexResult::new))); return allocationService.reroute(newState, "indices closed"); } diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java index 6f666483b18d0..b39a008de5f4f 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.Client; @@ -45,6 +46,7 @@ import org.elasticsearch.test.InternalTestCluster; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.concurrent.CountDownLatch; @@ -64,6 +66,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class CloseIndexIT extends ESIntegTestCase { @@ -115,7 +118,7 @@ public void testCloseIndex() throws Exception { indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, nbDocs) .mapToObj(i -> client().prepareIndex(indexName, "_doc", String.valueOf(i)).setSource("num", i)).collect(toList())); - assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName))); + assertBusy(() -> closeIndices(indexName)); assertIndexIsClosed(indexName); assertAcked(client().admin().indices().prepareOpen(indexName)); @@ -130,13 +133,17 @@ public void testCloseAlreadyClosedIndex() throws Exception { indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, randomIntBetween(1, 10)) .mapToObj(i -> client().prepareIndex(indexName, "_doc", String.valueOf(i)).setSource("num", i)).collect(toList())); } - // First close should be acked - assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName))); + // First close should be fully acked + assertBusy(() -> closeIndices(indexName)); assertIndexIsClosed(indexName); // Second close should be acked too final ActiveShardCount activeShardCount = randomFrom(ActiveShardCount.NONE, ActiveShardCount.DEFAULT, ActiveShardCount.ALL); - assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName).setWaitForActiveShards(activeShardCount))); + assertBusy(() -> { + CloseIndexResponse response = client().admin().indices().prepareClose(indexName).setWaitForActiveShards(activeShardCount).get(); + assertAcked(response); + assertTrue(response.getIndices().isEmpty()); + }); assertIndexIsClosed(indexName); } @@ -150,7 +157,7 @@ public void testCloseUnassignedIndex() throws Exception { assertThat(clusterState.metaData().indices().get(indexName).getState(), is(IndexMetaData.State.OPEN)); assertThat(clusterState.routingTable().allShards().stream().allMatch(ShardRouting::unassigned), is(true)); - assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName).setWaitForActiveShards(ActiveShardCount.NONE))); + assertBusy(() -> closeIndices(client().admin().indices().prepareClose(indexName).setWaitForActiveShards(ActiveShardCount.NONE))); assertIndexIsClosed(indexName); } @@ -198,7 +205,7 @@ public void testCloseWhileIndexingDocuments() throws Exception { indexer.setAssertNoFailuresOnStop(false); waitForDocs(randomIntBetween(10, 50), indexer); - assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName))); + assertBusy(() -> closeIndices(indexName)); indexer.stop(); nbDocs += indexer.totalIndexedDocs(); @@ -345,6 +352,9 @@ public void testCloseIndexWaitForActiveShards() throws Exception { assertThat(client().admin().cluster().prepareHealth(indexName).get().getStatus(), is(ClusterHealthStatus.GREEN)); assertTrue(closeIndexResponse.isAcknowledged()); assertTrue(closeIndexResponse.isShardsAcknowledged()); + assertThat(closeIndexResponse.getIndices().get(0), notNullValue()); + assertThat(closeIndexResponse.getIndices().get(0).hasFailures(), is(false)); + assertThat(closeIndexResponse.getIndices().get(0).getIndex().getName(), equalTo(indexName)); assertIndexIsClosed(indexName); } @@ -448,6 +458,36 @@ public void testResyncPropagatePrimaryTerm() throws Exception { } } + private static void closeIndices(final String... indices) { + closeIndices(client().admin().indices().prepareClose(indices)); + } + + private static void closeIndices(final CloseIndexRequestBuilder requestBuilder) { + final CloseIndexResponse response = requestBuilder.get(); + assertThat(response.isAcknowledged(), is(true)); + assertThat(response.isShardsAcknowledged(), is(true)); + + final String[] indices = requestBuilder.request().indices(); + if (indices != null) { + assertThat(response.getIndices().size(), equalTo(indices.length)); + for (String index : indices) { + CloseIndexResponse.IndexResult indexResult = response.getIndices().stream() + .filter(result -> index.equals(result.getIndex().getName())).findFirst().get(); + assertThat(indexResult, notNullValue()); + assertThat(indexResult.hasFailures(), is(false)); + assertThat(indexResult.getException(), nullValue()); + assertThat(indexResult.getShards(), notNullValue()); + Arrays.stream(indexResult.getShards()).forEach(shardResult -> { + assertThat(shardResult.hasFailures(), is(false)); + assertThat(shardResult.getFailures(), notNullValue()); + assertThat(shardResult.getFailures().length, equalTo(0)); + }); + } + } else { + assertThat(response.getIndices().size(), equalTo(0)); + } + } + static void assertIndexIsClosed(final String... indices) { final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); for (String index : indices) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java index 368afaa26d0cc..4c00485e631e2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CloseFollowerIndexStepTests.java @@ -44,7 +44,7 @@ public void testCloseFollowingIndex() { assertThat(closeIndexRequest.indices()[0], equalTo("follower-index")); @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; - listener.onResponse(new CloseIndexResponse(true, true)); + listener.onResponse(new CloseIndexResponse(true, true, Collections.emptyList())); return null; }).when(indicesClient).close(Mockito.any(), Mockito.any()); From f6ae6c470a33aed74a4802d891260d442bd51ce2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Thu, 23 May 2019 21:56:13 +0200 Subject: [PATCH 234/321] Implement XContentParser.genericMap and XContentParser.genericMapOrdered methods (#42059) Implement XContentParser.genericMap and XContentParser.genericMapOrdered methods --- .../common/xcontent/XContentParser.java | 15 +++ .../common/xcontent/XContentSubParser.java | 9 ++ .../support/AbstractXContentParser.java | 87 +++++++---------- .../common/xcontent/SimpleStruct.java | 93 +++++++++++++++++++ .../common/xcontent/XContentParserTests.java | 54 +++++++++++ .../xcontent/WatcherXContentParser.java | 8 ++ 6 files changed, 215 insertions(+), 51 deletions(-) create mode 100644 libs/x-content/src/test/java/org/elasticsearch/common/xcontent/SimpleStruct.java diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index 81cc39c5793cf..6d4da08bfaa59 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -19,11 +19,14 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.CheckedFunction; + import java.io.Closeable; import java.io.IOException; import java.nio.CharBuffer; import java.util.List; import java.util.Map; +import java.util.function.Supplier; /** * Interface for pull - parsing {@link XContent} see {@link XContentType} for supported types. @@ -135,6 +138,18 @@ enum NumberType { Map mapStringsOrdered() throws IOException; + /** + * Returns an instance of {@link Map} holding parsed map. + * Serves as a replacement for the "map", "mapOrdered", "mapStrings" and "mapStringsOrdered" methods above. + * + * @param mapFactory factory for creating new {@link Map} objects + * @param mapValueParser parser for parsing a single map value + * @param map value type + * @return {@link Map} object + */ + Map map( + Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException; + List list() throws IOException; List listOrderedMap() throws IOException; diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java index adcbf6ef1bee0..252bfea7ca9c0 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java @@ -19,10 +19,13 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.CheckedFunction; + import java.io.IOException; import java.nio.CharBuffer; import java.util.List; import java.util.Map; +import java.util.function.Supplier; /** * Wrapper for a XContentParser that makes a single object/array look like a complete document. @@ -110,6 +113,12 @@ public Map mapStringsOrdered() throws IOException { return parser.mapStringsOrdered(); } + @Override + public Map map( + Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { + return parser.map(mapFactory, mapValueParser); + } + @Override public List list() throws IOException { return parser.list(); diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index fa6ffdd0407f9..68e03e34a1a17 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.xcontent.support; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParseException; @@ -34,6 +35,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.function.Supplier; public abstract class AbstractXContentParser implements XContentParser { @@ -279,6 +281,12 @@ public Map mapStringsOrdered() throws IOException { return readOrderedMapStrings(this); } + @Override + public Map map( + Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { + return readGenericMap(this, mapFactory, mapValueParser); + } + @Override public List list() throws IOException { return readList(this); @@ -289,21 +297,13 @@ public List listOrderedMap() throws IOException { return readListOrderedMap(this); } - interface MapFactory { - Map newMap(); - } - - interface MapStringsFactory { - Map newMap(); - } - - static final MapFactory SIMPLE_MAP_FACTORY = HashMap::new; + static final Supplier> SIMPLE_MAP_FACTORY = HashMap::new; - static final MapFactory ORDERED_MAP_FACTORY = LinkedHashMap::new; + static final Supplier> ORDERED_MAP_FACTORY = LinkedHashMap::new; - static final MapStringsFactory SIMPLE_MAP_STRINGS_FACTORY = HashMap::new; + static final Supplier> SIMPLE_MAP_STRINGS_FACTORY = HashMap::new; - static final MapStringsFactory ORDERED_MAP_STRINGS_FACTORY = LinkedHashMap::new; + static final Supplier> ORDERED_MAP_STRINGS_FACTORY = LinkedHashMap::new; static Map readMap(XContentParser parser) throws IOException { return readMap(parser, SIMPLE_MAP_FACTORY); @@ -329,28 +329,19 @@ static List readListOrderedMap(XContentParser parser) throws IOException return readList(parser, ORDERED_MAP_FACTORY); } - static Map readMap(XContentParser parser, MapFactory mapFactory) throws IOException { - Map map = mapFactory.newMap(); - XContentParser.Token token = parser.currentToken(); - if (token == null) { - token = parser.nextToken(); - } - if (token == XContentParser.Token.START_OBJECT) { - token = parser.nextToken(); - } - for (; token == XContentParser.Token.FIELD_NAME; token = parser.nextToken()) { - // Must point to field name - String fieldName = parser.currentName(); - // And then the value... - token = parser.nextToken(); - Object value = readValue(parser, mapFactory, token); - map.put(fieldName, value); - } - return map; + static Map readMap(XContentParser parser, Supplier> mapFactory) throws IOException { + return readGenericMap(parser, mapFactory, p -> readValue(p, mapFactory)); } - static Map readMapStrings(XContentParser parser, MapStringsFactory mapStringsFactory) throws IOException { - Map map = mapStringsFactory.newMap(); + static Map readMapStrings(XContentParser parser, Supplier> mapFactory) throws IOException { + return readGenericMap(parser, mapFactory, XContentParser::text); + } + + static Map readGenericMap( + XContentParser parser, + Supplier> mapFactory, + CheckedFunction mapValueParser) throws IOException { + Map map = mapFactory.get(); XContentParser.Token token = parser.currentToken(); if (token == null) { token = parser.nextToken(); @@ -363,13 +354,13 @@ static Map readMapStrings(XContentParser parser, MapStringsFacto String fieldName = parser.currentName(); // And then the value... parser.nextToken(); - String value = parser.text(); + T value = mapValueParser.apply(parser); map.put(fieldName, value); } return map; } - static List readList(XContentParser parser, MapFactory mapFactory) throws IOException { + static List readList(XContentParser parser, Supplier> mapFactory) throws IOException { XContentParser.Token token = parser.currentToken(); if (token == null) { token = parser.nextToken(); @@ -386,28 +377,22 @@ static List readList(XContentParser parser, MapFactory mapFactory) throw ArrayList list = new ArrayList<>(); for (; token != null && token != XContentParser.Token.END_ARRAY; token = parser.nextToken()) { - list.add(readValue(parser, mapFactory, token)); + list.add(readValue(parser, mapFactory)); } return list; } - static Object readValue(XContentParser parser, MapFactory mapFactory, XContentParser.Token token) throws IOException { - if (token == XContentParser.Token.VALUE_NULL) { - return null; - } else if (token == XContentParser.Token.VALUE_STRING) { - return parser.text(); - } else if (token == XContentParser.Token.VALUE_NUMBER) { - return parser.numberValue(); - } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - return parser.booleanValue(); - } else if (token == XContentParser.Token.START_OBJECT) { - return readMap(parser, mapFactory); - } else if (token == XContentParser.Token.START_ARRAY) { - return readList(parser, mapFactory); - } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { - return parser.binaryValue(); + static Object readValue(XContentParser parser, Supplier> mapFactory) throws IOException { + switch (parser.currentToken()) { + case VALUE_STRING: return parser.text(); + case VALUE_NUMBER: return parser.numberValue(); + case VALUE_BOOLEAN: return parser.booleanValue(); + case START_OBJECT: return readMap(parser, mapFactory); + case START_ARRAY: return readList(parser, mapFactory); + case VALUE_EMBEDDED_OBJECT: return parser.binaryValue(); + case VALUE_NULL: + default: return null; } - return null; } @Override diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/SimpleStruct.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/SimpleStruct.java new file mode 100644 index 0000000000000..72bff3500be35 --- /dev/null +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/SimpleStruct.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Simple structure with 3 fields: int, double and String. + * Used for testing parsers. + */ +class SimpleStruct implements ToXContentObject { + + static SimpleStruct fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + private static final ParseField I = new ParseField("i"); + private static final ParseField D = new ParseField("d"); + private static final ParseField S = new ParseField("s"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "simple_struct", true, args -> new SimpleStruct((int) args[0], (double) args[1], (String) args[2])); + + static { + PARSER.declareInt(constructorArg(), I); + PARSER.declareDouble(constructorArg(), D); + PARSER.declareString(constructorArg(), S); + } + + private final int i; + private final double d; + private final String s; + + SimpleStruct(int i, double d, String s) { + this.i = i; + this.d = d; + this.s = s; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder + .startObject() + .field(I.getPreferredName(), i) + .field(D.getPreferredName(), d) + .field(S.getPreferredName(), s) + .endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SimpleStruct other = (SimpleStruct) o; + return i == other.i && d == other.d && Objects.equals(s, other.s); + } + + @Override + public int hashCode() { + return Objects.hash(i, d, s); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} + diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index 606d019f3c4f7..c519880224ccb 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -30,18 +30,21 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.nullValue; +import static org.junit.internal.matchers.ThrowableMessageMatcher.hasMessage; public class XContentParserTests extends ESTestCase { @@ -329,6 +332,57 @@ public void testNestedMapInList() throws IOException { } } + public void testGenericMap() throws IOException { + String content = "{" + + "\"c\": { \"i\": 3, \"d\": 0.3, \"s\": \"ccc\" }, " + + "\"a\": { \"i\": 1, \"d\": 0.1, \"s\": \"aaa\" }, " + + "\"b\": { \"i\": 2, \"d\": 0.2, \"s\": \"bbb\" }" + + "}"; + SimpleStruct structA = new SimpleStruct(1, 0.1, "aaa"); + SimpleStruct structB = new SimpleStruct(2, 0.2, "bbb"); + SimpleStruct structC = new SimpleStruct(3, 0.3, "ccc"); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { + Map actualMap = parser.map(HashMap::new, SimpleStruct::fromXContent); + // Verify map contents, ignore the iteration order. + assertThat(actualMap, equalTo(Map.of("a", structA, "b", structB, "c", structC))); + assertThat(actualMap.values(), containsInAnyOrder(structA, structB, structC)); + assertNull(parser.nextToken()); + } + } + + public void testGenericMapOrdered() throws IOException { + String content = "{" + + "\"c\": { \"i\": 3, \"d\": 0.3, \"s\": \"ccc\" }, " + + "\"a\": { \"i\": 1, \"d\": 0.1, \"s\": \"aaa\" }, " + + "\"b\": { \"i\": 2, \"d\": 0.2, \"s\": \"bbb\" }" + + "}"; + SimpleStruct structA = new SimpleStruct(1, 0.1, "aaa"); + SimpleStruct structB = new SimpleStruct(2, 0.2, "bbb"); + SimpleStruct structC = new SimpleStruct(3, 0.3, "ccc"); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { + Map actualMap = parser.map(LinkedHashMap::new, SimpleStruct::fromXContent); + // Verify map contents, ignore the iteration order. + assertThat(actualMap, equalTo(Map.of("a", structA, "b", structB, "c", structC))); + // Verify that map's iteration order is the same as the order in which fields appear in JSON. + assertThat(actualMap.values(), contains(structC, structA, structB)); + assertNull(parser.nextToken()); + } + } + + public void testGenericMap_Failure_MapContainingUnparsableValue() throws IOException { + String content = "{" + + "\"a\": { \"i\": 1, \"d\": 0.1, \"s\": \"aaa\" }, " + + "\"b\": { \"i\": 2, \"d\": 0.2, \"s\": 666 }, " + + "\"c\": { \"i\": 3, \"d\": 0.3, \"s\": \"ccc\" }" + + "}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { + XContentParseException exception = expectThrows( + XContentParseException.class, + () -> parser.map(HashMap::new, SimpleStruct::fromXContent)); + assertThat(exception, hasMessage(containsString("s doesn't support values of type: VALUE_NUMBER"))); + } + } + public void testSubParserObject() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); int numberOfTokens; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java index fcb3802ca6b76..1d155a5f0c02d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.watcher.support.xcontent; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -21,6 +22,7 @@ import java.time.ZonedDateTime; import java.util.List; import java.util.Map; +import java.util.function.Supplier; /** * A xcontent parser that is used by watcher. This is a special parser that is @@ -123,6 +125,12 @@ public Map mapStringsOrdered() throws IOException { return parser.mapStringsOrdered(); } + @Override + public Map map( + Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { + return parser.map(mapFactory, mapValueParser); + } + @Override public List list() throws IOException { return parser.list(); From 677c391df05ecccb4bb065666775577fd2d4185f Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 23 May 2019 21:04:03 +0100 Subject: [PATCH 235/321] Avoid HashMap construction on Grok non-match (#42444) This change moves the construction of the result HashMap in Grok.captures() into the branch that actually needs it. This probably will not make a measurable difference for ingest pipelines, but it is beneficial to the ML find_file_structure endpoint, as it tries out many Grok patterns that will fail to match. --- libs/grok/src/main/java/org/elasticsearch/grok/Grok.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java index c20737998feb9..473e8626a4c42 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java @@ -240,7 +240,6 @@ public boolean match(String text) { */ public Map captures(String text) { byte[] textAsBytes = text.getBytes(StandardCharsets.UTF_8); - Map fields = new HashMap<>(); Matcher matcher = compiledExpression.matcher(textAsBytes); int result; try { @@ -256,6 +255,7 @@ public Map captures(String text) { // TODO: I think we should throw an error here? return null; } else if (compiledExpression.numberOfNames() > 0) { + Map fields = new HashMap<>(); Region region = matcher.getEagerRegion(); for (Iterator entry = compiledExpression.namedBackrefIterator(); entry.hasNext();) { NameEntry e = entry.next(); @@ -270,8 +270,10 @@ public Map captures(String text) { } } } + return fields; + } else { + return Collections.emptyMap(); } - return fields; } public static Map getBuiltinPatterns() { From a15f1ee4f64b19670b15a9c1526c7b0b0204ea60 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 23 May 2019 21:06:47 +0100 Subject: [PATCH 236/321] [ML] Improve file structure finder timestamp format determination (#41948) This change contains a major refactoring of the timestamp format determination code used by the ML find file structure endpoint. Previously timestamp format determination was done separately for each piece of text supplied to the timestamp format finder. This had the drawback that it was not possible to distinguish dd/MM and MM/dd in the case where both numbers were 12 or less. In order to do this sensibly it is best to look across all the available timestamps and see if one of the numbers is greater than 12 in any of them. This necessitates making the timestamp format finder an instantiable class that can accumulate evidence over time. Another problem with the previous approach was that it was only possible to override the timestamp format to one of a limited set of timestamp formats. There was no way out if a file to be analysed had a timestamp that was sane yet not in the supported set. This is now changed to allow any timestamp format that can be parsed by a combination of these Java date/time formats: yy, yyyy, M, MM, MMM, MMMM, d, dd, EEE, EEEE, H, HH, h, mm, ss, a, XX, XXX, zzz Additionally S letter groups (fractional seconds) are supported providing they occur after ss and separated from the ss by a dot, comma or colon. Spacing and punctuation is also permitted with the exception of the question mark, newline and carriage return characters, together with literal text enclosed in single quotes. The full list of changes/improvements in this refactor is: - Make TimestampFormatFinder an instantiable class - Overrides must be specified in Java date/time format - Joda format is no longer accepted - Joda timestamp formats in outputs are now derived from the determined or overridden Java timestamp formats, not stored separately - Functionality for determining the "best" timestamp format in a set of lines has been moved from TextLogFileStructureFinder to TimestampFormatFinder, taking advantage of the fact that TimestampFormatFinder is now an instantiable class with state - The functionality to quickly rule out some possible Grok patterns when looking for timestamp formats has been changed from using simple regular expressions to the much faster approach of using the Shift-And method of sub-string search, but using an "alphabet" consisting of just 1 (representing any digit) and 0 (representing non-digits) - Timestamp format overrides are now much more flexible - Timestamp format overrides that do not correspond to a built-in Grok pattern are mapped to a %{CUSTOM_TIMESTAMP} Grok pattern whose definition is included within the date processor in the ingest pipeline - Grok patterns that correspond to multiple Java date/time patterns are now handled better - the Grok pattern is accepted as matching broadly, and the required set of Java date/time patterns is built up considering all observed samples - As a result of the more flexible acceptance of Grok patterns, when looking for the "best" timestamp in a set of lines timestamps are considered different if they are preceded by a different sequence of punctuation characters (to prevent timestamps far into some lines being considered similar to timestamps near the beginning of other lines) - Out-of-the-box Grok patterns that are considered now include %{DATE} and %{DATESTAMP}, which have indeterminate day/month ordering - The order of day/month in formats with indeterminate day/month order is determined by considering all observed samples (plus the server locale if the observed samples still do not suggest an ordering) Relates #38086 Closes #35137 Closes #35132 --- .../ml/apis/find-file-structure.asciidoc | 157 +- .../DelimitedFileStructureFinder.java | 23 +- .../FileStructureUtils.java | 117 +- .../GrokPatternCreator.java | 133 +- .../NdJsonFileStructureFinder.java | 11 +- .../TextLogFileStructureFinder.java | 125 +- .../TimestampFormatFinder.java | 1699 +++++++++++++---- .../XmlFileStructureFinder.java | 14 +- .../DelimitedFileStructureFinderTests.java | 14 +- .../FileStructureUtilsTests.java | 85 +- .../GrokPatternCreatorTests.java | 188 +- .../TextLogFileStructureFinderTests.java | 227 +-- .../TimestampFormatFinderTests.java | 1293 ++++++++++--- 13 files changed, 2907 insertions(+), 1179 deletions(-) diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index 9c21d2a88b49a..e9d9da479c0f2 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -147,57 +147,46 @@ is not compulsory to have a timestamp in the file. -- `timestamp_format`:: - (string) The time format of the timestamp field in the file. + + (string) The Java time format of the timestamp field in the file. + + -- -NOTE: Currently there is a limitation that this format must be one that the -structure finder might choose by itself. The reason for this restriction is that -to consistently set all the fields in the response the structure finder needs a -corresponding Grok pattern name and simple regular expression for each timestamp -format. Therefore, there is little value in specifying this parameter for -structured file formats. If you know which field contains your primary timestamp, -it is as good and less error-prone to just specify `timestamp_field`. - -The valuable use case for this parameter is when the format is semi-structured +NOTE: Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are +supported providing they occur after `ss` and separated from the `ss` by a `.`, +`,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, +newline and carriage return, together with literal text enclosed in single +quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override +format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the file, and you know which format corresponds to the primary timestamp, but you do not want to specify the -full `grok_pattern`. - -If this parameter is not specified, the structure finder chooses the best format from -the formats it knows, which are these Java time formats: - -* `dd/MMM/yyyy:HH:mm:ss XX` -* `EEE MMM dd HH:mm zzz yyyy` -* `EEE MMM dd HH:mm:ss yyyy` -* `EEE MMM dd HH:mm:ss zzz yyyy` -* `EEE MMM dd yyyy HH:mm zzz` -* `EEE MMM dd yyyy HH:mm:ss zzz` -* `EEE, dd MMM yyyy HH:mm XX` -* `EEE, dd MMM yyyy HH:mm XXX` -* `EEE, dd MMM yyyy HH:mm:ss XX` -* `EEE, dd MMM yyyy HH:mm:ss XXX` -* `ISO8601` -* `MMM d HH:mm:ss` -* `MMM d HH:mm:ss,SSS` -* `MMM d yyyy HH:mm:ss` -* `MMM dd HH:mm:ss` -* `MMM dd HH:mm:ss,SSS` -* `MMM dd yyyy HH:mm:ss` -* `MMM dd, yyyy h:mm:ss a` -* `TAI64N` -* `UNIX` -* `UNIX_MS` -* `yyyy-MM-dd HH:mm:ss` -* `yyyy-MM-dd HH:mm:ss,SSS` -* `yyyy-MM-dd HH:mm:ss,SSS XX` -* `yyyy-MM-dd HH:mm:ss,SSSXX` -* `yyyy-MM-dd HH:mm:ss,SSSXXX` -* `yyyy-MM-dd HH:mm:ssXX` -* `yyyy-MM-dd HH:mm:ssXXX` -* `yyyy-MM-dd'T'HH:mm:ss,SSS` -* `yyyy-MM-dd'T'HH:mm:ss,SSSXX` -* `yyyy-MM-dd'T'HH:mm:ss,SSSXXX` -* `yyyyMMddHHmmss` +full `grok_pattern`. Another is when the timestamp format is one that the +structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best +format from a built-in set. -- @@ -263,8 +252,18 @@ If the request does not encounter errors, you receive the following result: "charset" : "UTF-8", <4> "has_byte_order_marker" : false, <5> "format" : "ndjson", <6> - "need_client_timezone" : false, <7> - "mappings" : { <8> + "timestamp_field" : "release_date", <7> + "joda_timestamp_formats" : [ <8> + "ISO8601" + ], + "java_timestamp_formats" : [ <9> + "ISO8601" + ], + "need_client_timezone" : true, <10> + "mappings" : { <11> + "@timestamp" : { + "type" : "date" + }, "author" : { "type" : "keyword" }, @@ -275,10 +274,25 @@ If the request does not encounter errors, you receive the following result: "type" : "long" }, "release_date" : { - "type" : "keyword" + "type" : "date", + "format" : "iso8601" } }, - "field_stats" : { <9> + "ingest_pipeline" : { + "description" : "Ingest pipeline created by file structure finder", + "processors" : [ + { + "date" : { + "field" : "release_date", + "timezone" : "{{ beat.timezone }}", + "formats" : [ + "ISO8601" + ] + } + } + ] + }, + "field_stats" : { <12> "author" : { "count" : 24, "cardinality" : 20, @@ -484,17 +498,22 @@ If the request does not encounter errors, you receive the following result: <5> For UTF character encodings, `has_byte_order_marker` indicates whether the file begins with a byte order marker. <6> `format` is one of `ndjson`, `xml`, `delimited` or `semi_structured_text`. -<7> If a timestamp format is detected that does not include a timezone, - `need_client_timezone` will be `true`. The server that parses the file must - therefore be told the correct timezone by the client. -<8> `mappings` contains some suitable mappings for an index into which the data - could be ingested. In this case, the `release_date` field has been given a - `keyword` type as it is not considered specific enough to convert to the - `date` type. -<9> `field_stats` contains the most common values of each field, plus basic - numeric statistics for the numeric `page_count` field. This information - may provide clues that the data needs to be cleaned or transformed prior - to use by other {ml} functionality. +<7> The `timestamp_field` names the field considered most likely to be the + primary timestamp of each document. +<8> `joda_timestamp_formats` are used to tell Logstash how to parse timestamps. +<9> `java_timestamp_formats` are the Java time formats recognized in the time + fields. Elasticsearch mappings and Ingest pipeline use this format. +<10> If a timestamp format is detected that does not include a timezone, + `need_client_timezone` will be `true`. The server that parses the file must + therefore be told the correct timezone by the client. +<11> `mappings` contains some suitable mappings for an index into which the data + could be ingested. In this case, the `release_date` field has been given a + `keyword` type as it is not considered specific enough to convert to the + `date` type. +<12> `field_stats` contains the most common values of each field, plus basic + numeric statistics for the numeric `page_count` field. This information + may provide clues that the data needs to be cleaned or transformed prior + to use by other {ml} functionality. The next example shows how it's possible to find the structure of some New York City yellow cab trip data. The first `curl` command downloads the data, the @@ -526,7 +545,7 @@ If the request does not encounter errors, you receive the following result: "charset" : "UTF-8", "has_byte_order_marker" : false, "format" : "delimited", <2> - "multiline_start_pattern" : "^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", + "multiline_start_pattern" : "^.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "exclude_lines_pattern" : "^\"?VendorID\"?,\"?tpep_pickup_datetime\"?,\"?tpep_dropoff_datetime\"?,\"?passenger_count\"?,\"?trip_distance\"?,\"?RatecodeID\"?,\"?store_and_fwd_flag\"?,\"?PULocationID\"?,\"?DOLocationID\"?,\"?payment_type\"?,\"?fare_amount\"?,\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?", "column_names" : [ <3> "VendorID", @@ -1361,14 +1380,14 @@ this: "charset" : "UTF-8", "has_byte_order_marker" : false, "format" : "semi_structured_text", <1> - "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", <2> + "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", <2> "grok_pattern" : "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel}.*", <3> "timestamp_field" : "timestamp", "joda_timestamp_formats" : [ "ISO8601" ], "java_timestamp_formats" : [ - "yyyy-MM-dd'T'HH:mm:ss,SSS" + "ISO8601" ], "need_client_timezone" : true, "mappings" : { @@ -1398,7 +1417,7 @@ this: "field" : "timestamp", "timezone" : "{{ beat.timezone }}", "formats" : [ - "yyyy-MM-dd'T'HH:mm:ss,SSS" + "ISO8601" ] } }, @@ -1515,14 +1534,14 @@ this: "charset" : "UTF-8", "has_byte_order_marker" : false, "format" : "semi_structured_text", - "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", + "multiline_start_pattern" : "^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "grok_pattern" : "\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} *\\]\\[%{JAVACLASS:class} *\\] \\[%{HOSTNAME:node}\\] %{JAVALOGMESSAGE:message}", <1> "timestamp_field" : "timestamp", "joda_timestamp_formats" : [ "ISO8601" ], "java_timestamp_formats" : [ - "yyyy-MM-dd'T'HH:mm:ss,SSS" + "ISO8601" ], "need_client_timezone" : true, "mappings" : { @@ -1558,7 +1577,7 @@ this: "field" : "timestamp", "timezone" : "{{ beat.timezone }}", "formats" : [ - "yyyy-MM-dd'T'HH:mm:ss,SSS" + "ISO8601" ] } }, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java index dd30c0a1f94bc..aa88905962638 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import org.supercsv.exception.SuperCsvException; import org.supercsv.io.CsvListReader; import org.supercsv.prefs.CsvPreference; @@ -27,7 +26,6 @@ import java.util.Map; import java.util.Random; import java.util.SortedMap; -import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -62,7 +60,7 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List (field == null) ? null : field.trim()).collect(Collectors.toList()) : row); sampleRecords.add(sampleRecord); sampleMessages.add( - sampleLines.subList(prevMessageEndLineNumber + 1, lineNumbers.get(index)).stream().collect(Collectors.joining("\n"))); + String.join("\n", sampleLines.subList(prevMessageEndLineNumber + 1, lineNumbers.get(index)))); prevMessageEndLineNumber = lineNumber; } - String preamble = Pattern.compile("\n").splitAsStream(sample).limit(lineNumbers.get(1)).collect(Collectors.joining("\n", "", "\n")); + String preamble = String.join("\n", sampleLines.subList(0, lineNumbers.get(1))) + "\n"; + + // null to allow GC before timestamp search + sampleLines = null; char delimiter = (char) csvPreference.getDelimiterChar(); FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.DELIMITED) @@ -107,7 +108,7 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List timeField = FileStructureUtils.guessTimestampField(explanation, sampleRecords, overrides, + Tuple timeField = FileStructureUtils.guessTimestampField(explanation, sampleRecords, overrides, timeoutChecker); if (timeField != null) { String timeLineRegex = null; @@ -119,7 +120,7 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(Listnull if + * @return A tuple of (field name, timestamp format finder) if one can be found, or null if * there is no consistent timestamp. */ - static Tuple guessTimestampField(List explanation, List> sampleRecords, - FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { + static Tuple guessTimestampField(List explanation, List> sampleRecords, + FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { if (sampleRecords.isEmpty()) { return null; } + StringBuilder exceptionMsg = null; + // Accept the first match from the first sample that is compatible with all the other samples - for (Tuple candidate : findCandidates(explanation, sampleRecords, overrides, timeoutChecker)) { + for (Tuple candidate : findCandidates(explanation, sampleRecords, overrides, timeoutChecker)) { + + String fieldName = candidate.v1(); + TimestampFormatFinder timestampFormatFinder = candidate.v2(); boolean allGood = true; for (Map sampleRecord : sampleRecords.subList(1, sampleRecords.size())) { - Object fieldValue = sampleRecord.get(candidate.v1()); + Object fieldValue = sampleRecord.get(fieldName); if (fieldValue == null) { if (overrides.getTimestampField() != null) { throw new IllegalArgumentException("Specified timestamp field [" + overrides.getTimestampField() + "] is not present in record [" + sampleRecord + "]"); } - explanation.add("First sample match [" + candidate.v1() + "] ruled out because record [" + sampleRecord + + explanation.add("First sample match [" + fieldName + "] ruled out because record [" + sampleRecord + "] doesn't have field"); allGood = false; break; @@ -88,15 +91,20 @@ static Tuple guessTimestampField(List explanatio timeoutChecker.check("timestamp field determination"); - TimestampMatch match = TimestampFormatFinder.findFirstFullMatch(fieldValue.toString(), overrides.getTimestampFormat(), - timeoutChecker); - if (match == null || match.candidateIndex != candidate.v2().candidateIndex) { + try { + timestampFormatFinder.addSample(fieldValue.toString()); + } catch (IllegalArgumentException e) { if (overrides.getTimestampFormat() != null) { - throw new IllegalArgumentException("Specified timestamp format [" + overrides.getTimestampFormat() + - "] does not match for record [" + sampleRecord + "]"); + if (exceptionMsg == null) { + exceptionMsg = new StringBuilder("Specified timestamp format [" + overrides.getTimestampFormat() + + "] does not match"); + } else { + exceptionMsg.append(", nor"); + } + exceptionMsg.append(" for record [").append(sampleRecord).append("] in field [").append(fieldName).append("]"); } - explanation.add("First sample match [" + candidate.v1() + "] ruled out because record [" + sampleRecord + - "] matches differently: [" + match + "]"); + explanation.add("First sample match " + timestampFormatFinder.getRawJavaTimestampFormats() + + " ruled out because record [" + sampleRecord + "] does not match"); allGood = false; break; } @@ -104,16 +112,21 @@ static Tuple guessTimestampField(List explanatio if (allGood) { explanation.add(((overrides.getTimestampField() == null) ? "Guessing timestamp" : "Timestamp") + - " field is [" + candidate.v1() + "] with format [" + candidate.v2() + "]"); + " field is [" + fieldName + "] with format " + timestampFormatFinder.getJavaTimestampFormats()); return candidate; } } + if (exceptionMsg != null) { + throw new IllegalArgumentException(exceptionMsg.toString()); + } + return null; } - private static List> findCandidates(List explanation, List> sampleRecords, - FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { + private static List> findCandidates(List explanation, List> sampleRecords, + FileStructureOverrides overrides, + TimeoutChecker timeoutChecker) { assert sampleRecords.isEmpty() == false; Map firstRecord = sampleRecords.get(0); @@ -124,7 +137,7 @@ private static List> findCandidates(List e "] is not present in record [" + firstRecord + "]"); } - List> candidates = new ArrayList<>(); + List> candidates = new ArrayList<>(); // Get candidate timestamps from the possible field(s) of the first sample record for (Map.Entry field : firstRecord.entrySet()) { @@ -132,12 +145,17 @@ private static List> findCandidates(List e if (onlyConsiderField == null || onlyConsiderField.equals(fieldName)) { Object value = field.getValue(); if (value != null) { - TimestampMatch match = TimestampFormatFinder.findFirstFullMatch(value.toString(), overrides.getTimestampFormat(), - timeoutChecker); - if (match != null) { - Tuple candidate = new Tuple<>(fieldName, match); - candidates.add(candidate); - explanation.add("First sample timestamp match [" + candidate + "]"); + // Construct the TimestampFormatFinder outside the no-op catch because an exception + // from the constructor indicates a problem with the overridden format + TimestampFormatFinder timestampFormatFinder = + new TimestampFormatFinder(explanation, overrides.getTimestampFormat(), true, true, true, timeoutChecker); + try { + timestampFormatFinder.addSample(value.toString()); + candidates.add(new Tuple<>(fieldName, timestampFormatFinder)); + explanation.add("First sample timestamp match " + timestampFormatFinder.getRawJavaTimestampFormats() + + " for field [" + fieldName + "]"); + } catch (IllegalArgumentException e) { + // No possible timestamp format found in this particular field - not a problem } } } @@ -231,6 +249,27 @@ private static Stream flatten(Object value) { } } + /** + * Finds the appropriate date mapping for a collection of field values. Throws + * {@link IllegalArgumentException} if no consistent date mapping can be found. + * @param explanation List of reasons for choosing the overall file structure. This list + * may be non-empty when the method is called, and this method may + * append to it. + * @param fieldValues Values of the field for which mappings are to be guessed. The guessed + * mapping will be compatible with all the provided values. Must not be + * empty. + * @param timeoutChecker Will abort the operation if its timeout is exceeded. + * @return The sub-section of the index mappings most appropriate for the field. + */ + static Map findTimestampMapping(List explanation, Collection fieldValues, + TimeoutChecker timeoutChecker) { + assert fieldValues.isEmpty() == false; + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, timeoutChecker); + fieldValues.forEach(timestampFormatFinder::addSample); + return timestampFormatFinder.getEsDateMappingTypeWithFormat(); + } + /** * Given some sample values for a field, guess the most appropriate index mapping for the * field. @@ -247,26 +286,17 @@ private static Stream flatten(Object value) { */ static Map guessScalarMapping(List explanation, String fieldName, Collection fieldValues, TimeoutChecker timeoutChecker) { - assert fieldValues.isEmpty() == false; if (fieldValues.stream().allMatch(value -> "true".equals(value) || "false".equals(value))) { return Collections.singletonMap(MAPPING_TYPE_SETTING, "boolean"); } - // This checks if a date mapping would be appropriate, and, if so, finds the correct format - Iterator iter = fieldValues.iterator(); - TimestampMatch timestampMatch = TimestampFormatFinder.findFirstFullMatch(iter.next(), timeoutChecker); - while (timestampMatch != null && iter.hasNext()) { - // To be mapped as type date all the values must match the same timestamp format - it is - // not acceptable for all values to be dates, but with different formats - if (timestampMatch.equals(TimestampFormatFinder.findFirstFullMatch(iter.next(), timestampMatch.candidateIndex, - timeoutChecker)) == false) { - timestampMatch = null; - } - } - if (timestampMatch != null) { - return timestampMatch.getEsDateMappingTypeWithFormat(); + try { + return findTimestampMapping(explanation, fieldValues, timeoutChecker); + } catch (IllegalArgumentException e) { + // To be mapped as type "date" all the values must match the same timestamp format - if + // they don't we'll end up here, and move on to try other possible mappings } if (fieldValues.stream().allMatch(NUMBER_GROK::match)) { @@ -321,6 +351,7 @@ static boolean isMoreLikelyTextThanKeyword(String str) { * Create an ingest pipeline definition appropriate for the file structure. * @param grokPattern The Grok pattern used for parsing semi-structured text formats. null for * fully structured formats. + * @param customGrokPatternDefinitions The definitions for any custom patterns that {@code grokPattern} uses. * @param timestampField The input field containing the timestamp to be parsed into @timestamp. * null if there is no timestamp. * @param timestampFormats Timestamp formats to be used for parsing {@code timestampField}. @@ -328,7 +359,8 @@ static boolean isMoreLikelyTextThanKeyword(String str) { * @param needClientTimezone Is the timezone of the client supplying data to ingest required to uniquely parse the timestamp? * @return The ingest pipeline definition, or null if none is required. */ - public static Map makeIngestPipelineDefinition(String grokPattern, String timestampField, List timestampFormats, + public static Map makeIngestPipelineDefinition(String grokPattern, Map customGrokPatternDefinitions, + String timestampField, List timestampFormats, boolean needClientTimezone) { if (grokPattern == null && timestampField == null) { @@ -344,7 +376,12 @@ public static Map makeIngestPipelineDefinition(String grokPatter Map grokProcessorSettings = new LinkedHashMap<>(); grokProcessorSettings.put("field", "message"); grokProcessorSettings.put("patterns", Collections.singletonList(grokPattern)); + if (customGrokPatternDefinitions.isEmpty() == false) { + grokProcessorSettings.put("pattern_definitions", customGrokPatternDefinitions); + } processors.add(Collections.singletonMap("grok", grokProcessorSettings)); + } else { + assert customGrokPatternDefinitions.isEmpty(); } if (timestampField != null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreator.java index 6620afcb7145b..7a5c9a48f8757 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreator.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.grok.Grok; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.ArrayList; import java.util.Arrays; @@ -18,6 +17,7 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -76,10 +76,12 @@ public final class GrokPatternCreator { new ValueOnlyGrokPatternCandidate("DATESTAMP_RFC2822", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("DATESTAMP_OTHER", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("DATESTAMP_EVENTLOG", "date", "extra_timestamp"), + new ValueOnlyGrokPatternCandidate("HTTPDERROR_DATE", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("SYSLOGTIMESTAMP", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("HTTPDATE", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("CATALINA_DATESTAMP", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("CISCOTIMESTAMP", "date", "extra_timestamp"), + new ValueOnlyGrokPatternCandidate("DATESTAMP", "date", "extra_timestamp"), new ValueOnlyGrokPatternCandidate("LOGLEVEL", "keyword", "loglevel"), new ValueOnlyGrokPatternCandidate("URI", "keyword", "uri"), new ValueOnlyGrokPatternCandidate("UUID", "keyword", "uuid"), @@ -90,7 +92,8 @@ public final class GrokPatternCreator { // TODO: would be nice to have IPORHOST here, but HOSTNAME matches almost all words new ValueOnlyGrokPatternCandidate("IP", "ip", "ipaddress"), new ValueOnlyGrokPatternCandidate("DATE", "date", "date"), - new ValueOnlyGrokPatternCandidate("TIME", "date", "time"), + // A time with no date cannot be stored in a field of type "date", hence "keyword" + new ValueOnlyGrokPatternCandidate("TIME", "keyword", "time"), // This already includes pre/post break conditions new ValueOnlyGrokPatternCandidate("QUOTEDSTRING", "keyword", "field", "", ""), // Disallow +, - and . before numbers, as well as "word" characters, otherwise we'll pick @@ -121,6 +124,7 @@ public final class GrokPatternCreator { */ private final Map mappings; private final Map fieldStats; + private final Map grokPatternDefinitions; private final Map fieldNameCountStore = new HashMap<>(); private final StringBuilder overallGrokPatternBuilder = new StringBuilder(); private final TimeoutChecker timeoutChecker; @@ -131,16 +135,24 @@ public final class GrokPatternCreator { * can be appended by the methods of this class. * @param sampleMessages Sample messages that any Grok pattern found must match. * @param mappings Will be updated with mappings appropriate for the returned pattern, if non-null. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. * @param fieldStats Will be updated with field stats for the fields in the returned pattern, if non-null. + * @param customGrokPatternDefinitions Custom Grok pattern definitions to add to the built-in ones. + * @param timeoutChecker Will abort the operation if its timeout is exceeded. */ public GrokPatternCreator(List explanation, Collection sampleMessages, Map mappings, - Map fieldStats, TimeoutChecker timeoutChecker) { - this.explanation = explanation; + Map fieldStats, Map customGrokPatternDefinitions, + TimeoutChecker timeoutChecker) { + this.explanation = Objects.requireNonNull(explanation); this.sampleMessages = Collections.unmodifiableCollection(sampleMessages); this.mappings = mappings; this.fieldStats = fieldStats; - this.timeoutChecker = timeoutChecker; + if (customGrokPatternDefinitions.isEmpty()) { + grokPatternDefinitions = Grok.getBuiltinPatterns(); + } else { + grokPatternDefinitions = new HashMap<>(Grok.getBuiltinPatterns()); + grokPatternDefinitions.putAll(customGrokPatternDefinitions); + } + this.timeoutChecker = Objects.requireNonNull(timeoutChecker); } /** @@ -171,7 +183,8 @@ public Tuple findFullLineGrokPattern(String timestampField) { */ public void validateFullLineGrokPattern(String grokPattern, String timestampField) { - FullMatchGrokPatternCandidate candidate = FullMatchGrokPatternCandidate.fromGrokPattern(grokPattern, timestampField); + FullMatchGrokPatternCandidate candidate = FullMatchGrokPatternCandidate.fromGrokPattern(grokPattern, timestampField, + grokPatternDefinitions); if (candidate.matchesAll(sampleMessages, timeoutChecker)) { candidate.processMatch(explanation, sampleMessages, mappings, fieldStats, timeoutChecker); } else { @@ -189,7 +202,7 @@ public String createGrokPatternFromExamples(String seedPatternName, String seedF overallGrokPatternBuilder.setLength(0); - GrokPatternCandidate seedCandidate = new NoMappingGrokPatternCandidate(seedPatternName, seedFieldName); + GrokPatternCandidate seedCandidate = new NoMappingGrokPatternCandidate(seedPatternName, seedFieldName, grokPatternDefinitions); processCandidateAndSplit(seedCandidate, true, sampleMessages, false, 0, false, 0); @@ -215,8 +228,8 @@ private void processCandidateAndSplit(GrokPatternCandidate chosenPattern, boolea Collection prefaces = new ArrayList<>(); Collection epilogues = new ArrayList<>(); - String patternBuilderContent = - chosenPattern.processCaptures(fieldNameCountStore, snippets, prefaces, epilogues, mappings, fieldStats, timeoutChecker); + String patternBuilderContent = chosenPattern.processCaptures(explanation, fieldNameCountStore, snippets, prefaces, epilogues, + mappings, fieldStats, timeoutChecker); appendBestGrokMatchForStrings(false, prefaces, ignoreKeyValueCandidateLeft, ignoreValueOnlyCandidatesLeft); overallGrokPatternBuilder.append(patternBuilderContent); appendBestGrokMatchForStrings(isLast, epilogues, ignoreKeyValueCandidateRight, ignoreValueOnlyCandidatesRight); @@ -234,7 +247,7 @@ void appendBestGrokMatchForStrings(boolean isLast, Collection snippets, GrokPatternCandidate bestCandidate = null; if (snippets.isEmpty() == false) { - GrokPatternCandidate kvCandidate = new KeyValueGrokPatternCandidate(explanation); + GrokPatternCandidate kvCandidate = new KeyValueGrokPatternCandidate(); if (ignoreKeyValueCandidate == false && kvCandidate.matchesAll(snippets)) { bestCandidate = kvCandidate; } else { @@ -409,9 +422,9 @@ interface GrokPatternCandidate { * calculate field stats. * @return The string that needs to be incorporated into the overall Grok pattern for the line. */ - String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker); + String processCaptures(List explanation, Map fieldNameCountStore, Collection snippets, + Collection prefaces, Collection epilogues, Map mappings, + Map fieldStats, TimeoutChecker timeoutChecker); } /** @@ -434,10 +447,22 @@ static class ValueOnlyGrokPatternCandidate implements GrokPatternCandidate { * for the pre and/or post breaks. * * @param grokPatternName Name of the Grok pattern to try to match - must match one defined in Logstash. + * @param mappingType Data type for field in Elasticsearch mappings. * @param fieldName Name of the field to extract from the match. */ ValueOnlyGrokPatternCandidate(String grokPatternName, String mappingType, String fieldName) { - this(grokPatternName, mappingType, fieldName, "\\b", "\\b"); + this(grokPatternName, mappingType, fieldName, "\\b", "\\b", Grok.getBuiltinPatterns()); + } + + /** + * @param grokPatternName Name of the Grok pattern to try to match - must match one defined in Logstash. + * @param mappingType Data type for field in Elasticsearch mappings. + * @param fieldName Name of the field to extract from the match. + * @param grokPatternDefinitions Definitions of Grok patterns to be used. + */ + ValueOnlyGrokPatternCandidate(String grokPatternName, String mappingType, String fieldName, + Map grokPatternDefinitions) { + this(grokPatternName, mappingType, fieldName, "\\b", "\\b", grokPatternDefinitions); } /** @@ -448,11 +473,24 @@ static class ValueOnlyGrokPatternCandidate implements GrokPatternCandidate { * @param postBreak Only consider the match if it's broken from the following text by this. */ ValueOnlyGrokPatternCandidate(String grokPatternName, String mappingType, String fieldName, String preBreak, String postBreak) { + this(grokPatternName, mappingType, fieldName, preBreak, postBreak, Grok.getBuiltinPatterns()); + } + + /** + * @param grokPatternName Name of the Grok pattern to try to match - must match one defined in Logstash. + * @param mappingType Data type for field in Elasticsearch mappings. + * @param fieldName Name of the field to extract from the match. + * @param preBreak Only consider the match if it's broken from the previous text by this. + * @param postBreak Only consider the match if it's broken from the following text by this. + * @param grokPatternDefinitions Definitions of Grok patterns to be used. + */ + ValueOnlyGrokPatternCandidate(String grokPatternName, String mappingType, String fieldName, String preBreak, String postBreak, + Map grokPatternDefinitions) { this.grokPatternName = grokPatternName; this.mappingType = mappingType; this.fieldName = fieldName; // The (?m) here has the Ruby meaning, which is equivalent to (?s) in Java - grok = new Grok(Grok.getBuiltinPatterns(), "(?m)%{DATA:" + PREFACE + "}" + preBreak + + grok = new Grok(grokPatternDefinitions, "(?m)%{DATA:" + PREFACE + "}" + preBreak + "%{" + grokPatternName + ":" + VALUE + "}" + postBreak + "%{GREEDYDATA:" + EPILOGUE + "}", TimeoutChecker.watchdog); } @@ -467,9 +505,9 @@ public boolean matchesAll(Collection snippets) { * bit that matches. */ @Override - public String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker) { + public String processCaptures(List explanation, Map fieldNameCountStore, Collection snippets, + Collection prefaces, Collection epilogues, Map mappings, + Map fieldStats, TimeoutChecker timeoutChecker) { Collection values = new ArrayList<>(); for (String snippet : snippets) { Map captures = timeoutChecker.grokCaptures(grok, snippet, "full message Grok pattern field extraction"); @@ -485,10 +523,13 @@ public String processCaptures(Map fieldNameCountStore, Collecti if (mappings != null) { Map fullMappingType = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, mappingType); if ("date".equals(mappingType)) { - assert values.isEmpty() == false; - TimestampMatch timestampMatch = TimestampFormatFinder.findFirstFullMatch(values.iterator().next(), timeoutChecker); - if (timestampMatch != null) { - fullMappingType = timestampMatch.getEsDateMappingTypeWithFormat(); + try { + fullMappingType = FileStructureUtils.findTimestampMapping(explanation, values, timeoutChecker); + } catch (IllegalArgumentException e) { + // This feels like it shouldn't happen, but there may be some obscure edge case + // where it does, and in production it will cause less frustration to just return + // a mapping type of "date" with no format than to fail the whole analysis + assert e == null : e.getMessage(); } timeoutChecker.check("mapping determination"); } @@ -509,13 +550,9 @@ public String processCaptures(Map fieldNameCountStore, Collecti */ static class KeyValueGrokPatternCandidate implements GrokPatternCandidate { - private static final Pattern kvFinder = Pattern.compile("\\b(\\w+)=[\\w.-]+"); - private final List explanation; - private String fieldName; + private static final Pattern KV_FINDER = Pattern.compile("\\b(\\w+)=[\\w.-]+"); - KeyValueGrokPatternCandidate(List explanation) { - this.explanation = explanation; - } + private String fieldName; @Override public boolean matchesAll(Collection snippets) { @@ -523,7 +560,7 @@ public boolean matchesAll(Collection snippets) { boolean isFirst = true; for (String snippet : snippets) { if (isFirst) { - Matcher matcher = kvFinder.matcher(snippet); + Matcher matcher = KV_FINDER.matcher(snippet); while (matcher.find()) { candidateNames.add(matcher.group(1)); } @@ -540,9 +577,9 @@ public boolean matchesAll(Collection snippets) { } @Override - public String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker) { + public String processCaptures(List explanation, Map fieldNameCountStore, Collection snippets, + Collection prefaces, Collection epilogues, Map mappings, + Map fieldStats, TimeoutChecker timeoutChecker) { if (fieldName == null) { throw new IllegalStateException("Cannot process KV matches until a field name has been determined"); } @@ -578,15 +615,15 @@ public String processCaptures(Map fieldNameCountStore, Collecti */ static class NoMappingGrokPatternCandidate extends ValueOnlyGrokPatternCandidate { - NoMappingGrokPatternCandidate(String grokPatternName, String fieldName) { - super(grokPatternName, null, fieldName); + NoMappingGrokPatternCandidate(String grokPatternName, String fieldName, Map grokPatternDefinitions) { + super(grokPatternName, null, fieldName, grokPatternDefinitions); } @Override - public String processCaptures(Map fieldNameCountStore, Collection snippets, Collection prefaces, - Collection epilogues, Map mappings, Map fieldStats, - TimeoutChecker timeoutChecker) { - return super.processCaptures(fieldNameCountStore, snippets, prefaces, epilogues, null, fieldStats, timeoutChecker); + public String processCaptures(List explanation, Map fieldNameCountStore, Collection snippets, + Collection prefaces, Collection epilogues, Map mappings, + Map fieldStats, TimeoutChecker timeoutChecker) { + return super.processCaptures(explanation, fieldNameCountStore, snippets, prefaces, epilogues, null, fieldStats, timeoutChecker); } } @@ -600,17 +637,27 @@ static class FullMatchGrokPatternCandidate { private final Grok grok; static FullMatchGrokPatternCandidate fromGrokPatternName(String grokPatternName, String timeField) { - return new FullMatchGrokPatternCandidate("%{" + grokPatternName + "}", timeField); + return new FullMatchGrokPatternCandidate("%{" + grokPatternName + "}", timeField, Grok.getBuiltinPatterns()); + } + + static FullMatchGrokPatternCandidate fromGrokPatternName(String grokPatternName, String timeField, + Map grokPatternDefinitions) { + return new FullMatchGrokPatternCandidate("%{" + grokPatternName + "}", timeField, grokPatternDefinitions); } static FullMatchGrokPatternCandidate fromGrokPattern(String grokPattern, String timeField) { - return new FullMatchGrokPatternCandidate(grokPattern, timeField); + return new FullMatchGrokPatternCandidate(grokPattern, timeField, Grok.getBuiltinPatterns()); + } + + static FullMatchGrokPatternCandidate fromGrokPattern(String grokPattern, String timeField, + Map grokPatternDefinitions) { + return new FullMatchGrokPatternCandidate(grokPattern, timeField, grokPatternDefinitions); } - private FullMatchGrokPatternCandidate(String grokPattern, String timeField) { + private FullMatchGrokPatternCandidate(String grokPattern, String timeField, Map grokPatternDefinitions) { this.grokPattern = grokPattern; this.timeField = timeField; - grok = new Grok(Grok.getBuiltinPatterns(), grokPattern, TimeoutChecker.watchdog); + grok = new Grok(grokPatternDefinitions, grokPattern, TimeoutChecker.watchdog); } public String getTimeField() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java index 33d9ba56b3f53..116de8f7679d2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.io.IOException; import java.util.ArrayList; @@ -53,17 +52,17 @@ static NdJsonFileStructureFinder makeNdJsonFileStructureFinder(List expl .setNumLinesAnalyzed(sampleMessages.size()) .setNumMessagesAnalyzed(sampleRecords.size()); - Tuple timeField = + Tuple timeField = FileStructureUtils.guessTimestampField(explanation, sampleRecords, overrides, timeoutChecker); if (timeField != null) { boolean needClientTimeZone = timeField.v2().hasTimezoneDependentParsing(); structureBuilder.setTimestampField(timeField.v1()) - .setJodaTimestampFormats(timeField.v2().jodaTimestampFormats) - .setJavaTimestampFormats(timeField.v2().javaTimestampFormats) + .setJodaTimestampFormats(timeField.v2().getJodaTimestampFormats()) + .setJavaTimestampFormats(timeField.v2().getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) - .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, timeField.v1(), - timeField.v2().javaTimestampFormats, needClientTimeZone)); + .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), timeField.v1(), + timeField.v2().getJavaTimestampFormats(), needClientTimeZone)); } Tuple, SortedMap> mappingsAndFieldStats = diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java index 36e5e91b4326b..d07eea15f973f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java @@ -8,16 +8,12 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.regex.Pattern; @@ -30,25 +26,33 @@ public class TextLogFileStructureFinder implements FileStructureFinder { static TextLogFileStructureFinder makeTextLogFileStructureFinder(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker, FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { - String[] sampleLines = sample.split("\n"); - Tuple> bestTimestamp = mostLikelyTimestamp(sampleLines, overrides, timeoutChecker); - if (bestTimestamp == null) { - // Is it appropriate to treat a file that is neither structured nor has - // a regular pattern of timestamps as a log file? Probably not... - throw new IllegalArgumentException("Could not find " + - ((overrides.getTimestampFormat() == null) ? "a timestamp" : "the specified timestamp format") + " in the sample provided"); + TimestampFormatFinder timestampFormatFinder = populateTimestampFormatFinder(explanation, sampleLines, overrides, timeoutChecker); + switch (timestampFormatFinder.getNumMatchedFormats()) { + case 0: + // Is it appropriate to treat a file that is neither structured nor has + // a regular pattern of timestamps as a log file? Probably not... + throw new IllegalArgumentException("Could not find " + ((overrides.getTimestampFormat() == null) + ? "a timestamp" + : "the specified timestamp format") + " in the sample provided"); + case 1: + // Simple case + break; + default: + timestampFormatFinder.selectBestMatch(); + break; } - explanation.add(((overrides.getTimestampFormat() == null) ? "Most likely timestamp" : "Timestamp") + " format is [" + - bestTimestamp.v1() + "]"); + explanation.add(((overrides.getTimestampFormat() == null) ? "Most likely timestamp" : "Timestamp") + " format is " + + timestampFormatFinder.getJavaTimestampFormats()); List sampleMessages = new ArrayList<>(); StringBuilder preamble = new StringBuilder(); int linesConsumed = 0; StringBuilder message = null; int linesInMessage = 0; - String multiLineRegex = createMultiLineMessageStartRegex(bestTimestamp.v2(), bestTimestamp.v1().simplePattern.pattern()); + String multiLineRegex = createMultiLineMessageStartRegex(timestampFormatFinder.getPrefaces(), + timestampFormatFinder.getSimplePattern().pattern()); Pattern multiLinePattern = Pattern.compile(multiLineRegex); for (String sampleLine : sampleLines) { if (multiLinePattern.matcher(sampleLine).find()) { @@ -82,6 +86,9 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex + "problem is probably that the primary timestamp format has been incorrectly detected, so try overriding it."); } + // null to allow GC before Grok pattern search + sampleLines = null; + FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.SEMI_STRUCTURED_TEXT) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) @@ -97,7 +104,9 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex SortedMap fieldStats = new TreeMap<>(); fieldStats.put("message", FileStructureUtils.calculateFieldStats(sampleMessages, timeoutChecker)); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, fieldStats, timeoutChecker); + Map customGrokPatternDefinitions = timestampFormatFinder.getCustomGrokPatternDefinitions(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, fieldStats, + customGrokPatternDefinitions, timeoutChecker); // We can't parse directly into @timestamp using Grok, so parse to some other time field, which the date filter will then remove String interimTimestampField = overrides.getTimestampField(); String grokPattern = overrides.getGrokPattern(); @@ -116,20 +125,22 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex if (interimTimestampField == null) { interimTimestampField = "timestamp"; } - grokPattern = grokPatternCreator.createGrokPatternFromExamples(bestTimestamp.v1().grokPatternName, interimTimestampField); + grokPattern = + grokPatternCreator.createGrokPatternFromExamples(timestampFormatFinder.getGrokPatternName(), interimTimestampField); } } - boolean needClientTimeZone = bestTimestamp.v1().hasTimezoneDependentParsing(); + boolean needClientTimeZone = timestampFormatFinder.hasTimezoneDependentParsing(); FileStructure structure = structureBuilder .setTimestampField(interimTimestampField) - .setJodaTimestampFormats(bestTimestamp.v1().jodaTimestampFormats) - .setJavaTimestampFormats(bestTimestamp.v1().javaTimestampFormats) + .setJodaTimestampFormats(timestampFormatFinder.getJodaTimestampFormats()) + .setJavaTimestampFormats(timestampFormatFinder.getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) .setGrokPattern(grokPattern) - .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(grokPattern, interimTimestampField, - bestTimestamp.v1().javaTimestampFormats, needClientTimeZone)) + .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(grokPattern, + customGrokPatternDefinitions, interimTimestampField, + timestampFormatFinder.getJavaTimestampFormats(), needClientTimeZone)) .setMappings(mappings) .setFieldStats(fieldStats) .setExplanation(explanation) @@ -153,79 +164,23 @@ public FileStructure getStructure() { return structure; } - static Tuple> mostLikelyTimestamp(String[] sampleLines, FileStructureOverrides overrides, - TimeoutChecker timeoutChecker) { + static TimestampFormatFinder populateTimestampFormatFinder(List explanation, String[] sampleLines, + FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { + TimestampFormatFinder timestampFormatFinder = + new TimestampFormatFinder(explanation, overrides.getTimestampFormat(), false, false, false, timeoutChecker); - Map>> timestampMatches = new LinkedHashMap<>(); - - int remainingLines = sampleLines.length; - double differenceBetweenTwoHighestWeights = 0.0; for (String sampleLine : sampleLines) { - TimestampMatch match = TimestampFormatFinder.findFirstMatch(sampleLine, overrides.getTimestampFormat(), timeoutChecker); - if (match != null) { - TimestampMatch pureMatch = new TimestampMatch(match.candidateIndex, "", match.jodaTimestampFormats, - match.javaTimestampFormats, match.simplePattern, match.grokPatternName, ""); - timestampMatches.compute(pureMatch, (k, v) -> { - if (v == null) { - return new Tuple<>(weightForMatch(match.preface), new HashSet<>(Collections.singletonList(match.preface))); - } else { - v.v2().add(match.preface); - return new Tuple<>(v.v1() + weightForMatch(match.preface), v.v2()); - } - }); - differenceBetweenTwoHighestWeights = findDifferenceBetweenTwoHighestWeights(timestampMatches.values()); - } - timeoutChecker.check("timestamp format determination"); - // The highest possible weight is 1, so if the difference between the two highest weights - // is less than the number of lines remaining then the leader cannot possibly be overtaken - if (differenceBetweenTwoHighestWeights > --remainingLines) { - break; - } + timestampFormatFinder.addSample(sampleLine); } - double highestWeight = 0.0; - Tuple> highestWeightMatch = null; - for (Map.Entry>> entry : timestampMatches.entrySet()) { - double weight = entry.getValue().v1(); - if (weight > highestWeight) { - highestWeight = weight; - highestWeightMatch = new Tuple<>(entry.getKey(), entry.getValue().v2()); - } - } - return highestWeightMatch; - } - - /** - * Used to weight a timestamp match according to how far along the line it is found. - * Timestamps at the very beginning of the line are given a weight of 1. The weight - * progressively decreases the more text there is preceding the timestamp match, but - * is always greater than 0. - * @return A weight in the range (0, 1]. - */ - private static double weightForMatch(String preface) { - return Math.pow(1.0 + preface.length() / 15.0, -1.1); - } - - private static double findDifferenceBetweenTwoHighestWeights(Collection>> timestampMatches) { - double highestWeight = 0.0; - double secondHighestWeight = 0.0; - for (Tuple> timestampMatch : timestampMatches) { - double weight = timestampMatch.v1(); - if (weight > highestWeight) { - secondHighestWeight = highestWeight; - highestWeight = weight; - } else if (weight > secondHighestWeight) { - secondHighestWeight = weight; - } - } - return highestWeight - secondHighestWeight; + return timestampFormatFinder; } - static String createMultiLineMessageStartRegex(Collection prefaces, String timestampRegex) { + static String createMultiLineMessageStartRegex(Collection prefaces, String simpleDateRegex) { StringBuilder builder = new StringBuilder("^"); GrokPatternCreator.addIntermediateRegex(builder, prefaces); - builder.append(timestampRegex); + builder.append(simpleDateRegex); if (builder.substring(0, 3).equals("^\\b")) { builder.delete(1, 3); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java index c19a93a7be99e..0283437d64808 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java @@ -5,56 +5,106 @@ */ package org.elasticsearch.xpack.ml.filestructurefinder; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.grok.Grok; +import java.time.DateTimeException; +import java.time.Instant; +import java.time.LocalDate; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.format.FormatStyle; +import java.time.format.ResolverStyle; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; +import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; +import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Used to find the best timestamp format for one of the following situations: * 1. Matching an entire field value * 2. Matching a timestamp found somewhere within a message + * + * This class is not thread safe. Each object of this class should only be used from within a single thread. */ public final class TimestampFormatFinder { private static final String PREFACE = "preface"; private static final String EPILOGUE = "epilogue"; + private static final String PUNCTUATION_THAT_NEEDS_ESCAPING_IN_REGEX = "\\|()[]{}^$.*?"; private static final String FRACTIONAL_SECOND_SEPARATORS = ":.,"; - private static final Pattern FRACTIONAL_SECOND_INTERPRETER = Pattern.compile("([" + FRACTIONAL_SECOND_SEPARATORS + "])(\\d{3,9})"); - private static final char DEFAULT_FRACTIONAL_SECOND_SEPARATOR = ','; - private static final Pattern FRACTIONAL_SECOND_TIMESTAMP_FORMAT_PATTERN = - Pattern.compile("([" + FRACTIONAL_SECOND_SEPARATORS + "]S{3,9})"); - private static final String DEFAULT_FRACTIONAL_SECOND_FORMAT = DEFAULT_FRACTIONAL_SECOND_SEPARATOR + "SSS"; - - /** - * The timestamp patterns are complex and it can be slow to prove they do not - * match anywhere in a long message. Many of the timestamps are similar and - * will never be found in a string if simpler sub-patterns do not exist in the - * string. These sub-patterns can be used to quickly rule out multiple complex - * patterns. These patterns do not need to represent quantities that are - * useful to know the value of, merely character sequences that can be used to - * prove that several more complex patterns cannot possibly match. - */ - private static final List QUICK_RULE_OUT_PATTERNS = Arrays.asList( - // YYYY-MM-dd followed by a space - Pattern.compile("\\b\\d{4}-\\d{2}-\\d{2} "), - // The end of some number (likely year or day) followed by a space then HH:mm - Pattern.compile("\\d \\d{2}:\\d{2}\\b"), - // HH:mm:ss surrounded by spaces - Pattern.compile(" \\d{2}:\\d{2}:\\d{2} "), - // Literal 'T' surrounded by numbers - Pattern.compile("\\dT\\d") - ); + private static final char INDETERMINATE_FIELD_PLACEHOLDER = '?'; + // The ? characters in this must match INDETERMINATE_FIELD_PLACEHOLDER + // above, but they're literals in this regex to aid readability + private static final Pattern INDETERMINATE_FORMAT_INTERPRETER = Pattern.compile("([^?]*)(\\?{1,2})(?:([^?]*)(\\?{1,2})([^?]*))?"); + + /** + * These are the date format letter groups that are supported in custom formats + * + * (Note: Fractional seconds is a special case as they have to follow seconds.) + */ + private static final Map> VALID_LETTER_GROUPS; + static { + Map> validLetterGroups = new HashMap<>(); + validLetterGroups.put("yyyy", new Tuple<>("%{YEAR}", "\\d{4}")); + validLetterGroups.put("yy", new Tuple<>("%{YEAR}", "\\d{2}")); + validLetterGroups.put("M", new Tuple<>("%{MONTHNUM}", "\\d{1,2}")); + validLetterGroups.put("MM", new Tuple<>("%{MONTHNUM2}", "\\d{2}")); + // The simple regex here is based on the fact that the %{MONTH} Grok pattern only matches English and German month names + validLetterGroups.put("MMM", new Tuple<>("%{MONTH}", "[A-Z]\\S{2}")); + validLetterGroups.put("MMMM", new Tuple<>("%{MONTH}", "[A-Z]\\S{2,8}")); + validLetterGroups.put("d", new Tuple<>("%{MONTHDAY}", "\\d{1,2}")); + validLetterGroups.put("dd", new Tuple<>("%{MONTHDAY}", "\\d{2}")); + // The simple regex here is based on the fact that the %{DAY} Grok pattern only matches English and German day names + validLetterGroups.put("EEE", new Tuple<>("%{DAY}", "[A-Z]\\S{2}")); + validLetterGroups.put("EEEE", new Tuple<>("%{DAY}", "[A-Z]\\S{2,8}")); + validLetterGroups.put("H", new Tuple<>("%{HOUR}", "\\d{1,2}")); + validLetterGroups.put("HH", new Tuple<>("%{HOUR}", "\\d{2}")); + validLetterGroups.put("h", new Tuple<>("%{HOUR}", "\\d{1,2}")); + validLetterGroups.put("mm", new Tuple<>("%{MINUTE}", "\\d{2}")); + validLetterGroups.put("ss", new Tuple<>("%{SECOND}", "\\d{2}")); + validLetterGroups.put("a", new Tuple<>("(?:AM|PM)", "[AP]M")); + validLetterGroups.put("XX", new Tuple<>("%{ISO8601_TIMEZONE}", "(?:Z|[+-]\\d{4})")); + validLetterGroups.put("XXX", new Tuple<>("%{ISO8601_TIMEZONE}", "(?:Z|[+-]\\d{2}:\\d{2})")); + validLetterGroups.put("zzz", new Tuple<>("%{TZ}", "[A-Z]{3}")); + VALID_LETTER_GROUPS = Collections.unmodifiableMap(validLetterGroups); + } + + static final String CUSTOM_TIMESTAMP_GROK_NAME = "CUSTOM_TIMESTAMP"; + + /** + * Candidates for the special format strings (ISO8601, UNIX_MS, UNIX and TAI64N) + */ + static final CandidateTimestampFormat ISO8601_CANDIDATE_FORMAT = + new CandidateTimestampFormat(CandidateTimestampFormat::iso8601FormatFromExample, + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "\\b%{TIMESTAMP_ISO8601}\\b", "TIMESTAMP_ISO8601", + "1111 11 11 11 11", 0, 19); + static final CandidateTimestampFormat UNIX_MS_CANDIDATE_FORMAT = + new CandidateTimestampFormat(example -> Collections.singletonList("UNIX_MS"), "\\b\\d{13}\\b", "\\b\\d{13}\\b", "POSINT", + "1111111111111", 0, 0); + static final CandidateTimestampFormat UNIX_CANDIDATE_FORMAT = + new CandidateTimestampFormat(example -> Collections.singletonList("UNIX"), "\\b\\d{10}\\b", "\\b\\d{10}(?:\\.\\d{3,9})?\\b", + "NUMBER", "1111111111", 0, 10); + static final CandidateTimestampFormat TAI64N_CANDIDATE_FORMAT = + new CandidateTimestampFormat(example -> Collections.singletonList("TAI64N"), "\\b[0-9A-Fa-f]{24}\\b", "\\b[0-9A-Fa-f]{24}\\b", + "BASE16NUM"); /** * The first match in this list will be chosen, so it needs to be ordered @@ -64,427 +114,1210 @@ public final class TimestampFormatFinder { // The TOMCAT_DATESTAMP format has to come before ISO8601 because it's basically ISO8601 but // with a space before the timezone, and because the timezone is optional in ISO8601 it will // be recognised as that with the timezone missed off if ISO8601 is checked first - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ss,SSS Z", "yyyy-MM-dd HH:mm:ss,SSS XX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", + new CandidateTimestampFormat(example -> CandidateTimestampFormat.iso8601LikeFormatFromExample(example, " ", " "), + "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}[:.,]\\d{3}", "\\b20\\d{2}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9} (?:Z|[+-]%{HOUR}%{MINUTE})\\b", - "TOMCAT_DATESTAMP", Arrays.asList(0, 1)), - // The Elasticsearch ISO8601 parser requires a literal T between the date and time, so - // longhand formats are needed if there's a space instead - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ss,SSSZ", "yyyy-MM-dd HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}(?:Z|[+-]%{HOUR}%{MINUTE})\\b", - "TIMESTAMP_ISO8601", Arrays.asList(0, 1)), - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ss,SSSZZ", "yyyy-MM-dd HH:mm:ss,SSSXXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}[+-]%{HOUR}:%{MINUTE}\\b", - "TIMESTAMP_ISO8601", Arrays.asList(0, 1)), - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ss,SSS", "yyyy-MM-dd HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}\\b", "TIMESTAMP_ISO8601", - Arrays.asList(0, 1)), - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ssZ", "yyyy-MM-dd HH:mm:ssXX", "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)(?:Z|[+-]%{HOUR}%{MINUTE})\\b", "TIMESTAMP_ISO8601", - Arrays.asList(0, 1)), - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ssZZ", "yyyy-MM-dd HH:mm:ssXXX", "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[+-]%{HOUR}:%{MINUTE}\\b", "TIMESTAMP_ISO8601", - Arrays.asList(0, 1)), - new CandidateTimestampFormat("YYYY-MM-dd HH:mm:ss", "yyyy-MM-dd HH:mm:ss", "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)\\b", "TIMESTAMP_ISO8601", - Arrays.asList(0, 1)), - // When using Java time the Elasticsearch ISO8601 parser for fractional time requires that the fractional - // separator match the current JVM locale, which is too restrictive for arbitrary log file parsing - new CandidateTimestampFormat("ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY}T%{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}(?:Z|[+-]%{HOUR}%{MINUTE})\\b", - "TIMESTAMP_ISO8601", Collections.singletonList(3)), - new CandidateTimestampFormat("ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSSXXX", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY}T%{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}[+-]%{HOUR}:%{MINUTE}\\b", - "TIMESTAMP_ISO8601", Collections.singletonList(3)), - new CandidateTimestampFormat("ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", - "\\b%{YEAR}-%{MONTHNUM}-%{MONTHDAY}T%{HOUR}:?%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}\\b", "TIMESTAMP_ISO8601", - Collections.singletonList(3)), - new CandidateTimestampFormat("ISO8601", "ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", "\\b%{TIMESTAMP_ISO8601}\\b", - "TIMESTAMP_ISO8601", Collections.singletonList(3)), - new CandidateTimestampFormat("EEE MMM dd YYYY HH:mm:ss zzz", "EEE MMM dd yyyy HH:mm:ss zzz", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2} ", - "\\b%{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) %{TZ}\\b", "DATESTAMP_RFC822", Arrays.asList(1, 2)), - new CandidateTimestampFormat("EEE MMM dd YYYY HH:mm zzz", "EEE MMM dd yyyy HH:mm zzz", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{4} \\d{2}:\\d{2} ", - "\\b%{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{HOUR}:%{MINUTE} %{TZ}\\b", "DATESTAMP_RFC822", Collections.singletonList(1)), - new CandidateTimestampFormat("EEE, dd MMM YYYY HH:mm:ss ZZ", "EEE, dd MMM yyyy HH:mm:ss XXX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2}:\\d{2} ", - "\\b%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) (?:Z|[+-]%{HOUR}:%{MINUTE})\\b", - "DATESTAMP_RFC2822", Arrays.asList(1, 2)), - new CandidateTimestampFormat("EEE, dd MMM YYYY HH:mm:ss Z", "EEE, dd MMM yyyy HH:mm:ss XX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2}:\\d{2} ", - "\\b%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) (?:Z|[+-]%{HOUR}%{MINUTE})\\b", - "DATESTAMP_RFC2822", Arrays.asList(1, 2)), - new CandidateTimestampFormat("EEE, dd MMM YYYY HH:mm ZZ", "EEE, dd MMM yyyy HH:mm XXX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2} ", - "\\b%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{HOUR}:%{MINUTE} (?:Z|[+-]%{HOUR}:%{MINUTE})\\b", "DATESTAMP_RFC2822", - Collections.singletonList(1)), - new CandidateTimestampFormat("EEE, dd MMM YYYY HH:mm Z", "EEE, dd MMM yyyy HH:mm XX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2} ", - "\\b%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{HOUR}:%{MINUTE} (?:Z|[+-]%{HOUR}%{MINUTE})\\b", "DATESTAMP_RFC2822", - Collections.singletonList(1)), - new CandidateTimestampFormat("EEE MMM dd HH:mm:ss zzz YYYY", "EEE MMM dd HH:mm:ss zzz yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2} [A-Z]{3,4} \\d{4}\\b", - "\\b%{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) %{TZ} %{YEAR}\\b", "DATESTAMP_OTHER", - Arrays.asList(1, 2)), - new CandidateTimestampFormat("EEE MMM dd HH:mm zzz YYYY", "EEE MMM dd HH:mm zzz yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2} [A-Z]{3,4} \\d{4}\\b", - "\\b%{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE} %{TZ} %{YEAR}\\b", "DATESTAMP_OTHER", Collections.singletonList(1)), - new CandidateTimestampFormat("YYYYMMddHHmmss", "yyyyMMddHHmmss", "\\b\\d{14}\\b", + "TOMCAT_DATESTAMP", "1111 11 11 11 11 11 111", 0, 13), + ISO8601_CANDIDATE_FORMAT, + new CandidateTimestampFormat( + example -> Arrays.asList("EEE MMM dd yy HH:mm:ss zzz", "EEE MMM d yy HH:mm:ss zzz"), + "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{1,2} \\d{2} \\d{2}:\\d{2}:\\d{2}\\b", + "\\b%{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{HOUR}:%{MINUTE}(?::(?:[0-5][0-9]|60)) %{TZ}\\b", "DATESTAMP_RFC822", + Arrays.asList(" 11 11 11 11 11", " 1 11 11 11 11"), 0, 5), + new CandidateTimestampFormat( + example -> CandidateTimestampFormat.adjustTrailingTimezoneFromExample(example, "EEE, dd MMM yyyy HH:mm:ss XX"), + "\\b[A-Z]\\S{2}, \\d{1,2} [A-Z]\\S{2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", + "\\b%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{HOUR}:%{MINUTE}(?::(?:[0-5][0-9]|60)) (?:Z|[+-]%{HOUR}:?%{MINUTE})\\b", + "DATESTAMP_RFC2822", Arrays.asList(" 11 1111 11 11 11", " 1 1111 11 11 11"), 0, 7), + new CandidateTimestampFormat( + example -> Arrays.asList("EEE MMM dd HH:mm:ss zzz yyyy", "EEE MMM d HH:mm:ss zzz yyyy"), + "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + "\\b%{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}(?::(?:[0-5][0-9]|60)) %{TZ} %{YEAR}\\b", "DATESTAMP_OTHER", + Arrays.asList(" 11 11 11 11", " 1 11 11 11"), 12, 10), + new CandidateTimestampFormat(example -> Collections.singletonList("yyyyMMddHHmmss"), "\\b\\d{14}\\b", "\\b20\\d{2}%{MONTHNUM2}(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01]))(?:2[0123]|[01][0-9])%{MINUTE}(?:[0-5][0-9]|60)\\b", - "DATESTAMP_EVENTLOG"), - new CandidateTimestampFormat("EEE MMM dd HH:mm:ss YYYY", "EEE MMM dd HH:mm:ss yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2} \\d{4}\\b", - "\\b%{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) %{YEAR}\\b", "HTTPDERROR_DATE", Arrays.asList(1, 2)), - new CandidateTimestampFormat(Arrays.asList("MMM dd HH:mm:ss,SSS", "MMM d HH:mm:ss,SSS"), - Arrays.asList("MMM dd HH:mm:ss,SSS", "MMM d HH:mm:ss,SSS"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2},\\d{3}", - "%{MONTH} +%{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60)[:.,][0-9]{3,9}\\b", "SYSLOGTIMESTAMP", - Collections.singletonList(1)), - new CandidateTimestampFormat(Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", "%{MONTH} +%{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60)\\b", - "SYSLOGTIMESTAMP", Collections.singletonList(1)), - new CandidateTimestampFormat("dd/MMM/YYYY:HH:mm:ss Z", "dd/MMM/yyyy:HH:mm:ss XX", + "DATESTAMP_EVENTLOG", "11111111111111", 0, 0), + new CandidateTimestampFormat(example -> Collections.singletonList("EEE MMM dd HH:mm:ss yyyy"), + "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{2} \\d{2}:\\d{2}:\\d{2} \\d{4}\\b", + "\\b%{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) %{YEAR}\\b", "HTTPDERROR_DATE", + " 11 11 11 11 1111", 0, 0), + new CandidateTimestampFormat( + example -> CandidateTimestampFormat.expandDayAndAdjustFractionalSecondsFromExample(example, "MMM dd HH:mm:ss"), + "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + "%{MONTH} +%{MONTHDAY} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60)(?:[:.,][0-9]{3,9})?\\b", "SYSLOGTIMESTAMP", + Arrays.asList(" 11 11 11 11", " 1 11 11 11"), 4, 10), + new CandidateTimestampFormat(example -> Collections.singletonList("dd/MMM/yyyy:HH:mm:ss XX"), "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", - "\\b%{MONTHDAY}/%{MONTH}/%{YEAR}:%{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) [+-]?%{HOUR}%{MINUTE}\\b", "HTTPDATE"), - new CandidateTimestampFormat("MMM dd, YYYY h:mm:ss a", "MMM dd, yyyy h:mm:ss a", - "\\b[A-Z]\\S{2,8} \\d{1,2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", - "%{MONTH} %{MONTHDAY}, 20\\d{2} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) (?:AM|PM)\\b", "CATALINA_DATESTAMP"), - new CandidateTimestampFormat(Arrays.asList("MMM dd YYYY HH:mm:ss", "MMM d YYYY HH:mm:ss"), - Arrays.asList("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", - "%{MONTH} +%{MONTHDAY} %{YEAR} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60)\\b", "CISCOTIMESTAMP", Collections.singletonList(1)), - new CandidateTimestampFormat("UNIX_MS", "UNIX_MS", "\\b\\d{13}\\b", "\\b\\d{13}\\b", "POSINT"), - new CandidateTimestampFormat("UNIX", "UNIX", "\\b\\d{10}\\.\\d{3,9}\\b", "\\b\\d{10}\\.(?:\\d{3}){1,3}\\b", "NUMBER"), - new CandidateTimestampFormat("UNIX", "UNIX", "\\b\\d{10}\\b", "\\b\\d{10}\\b", "POSINT"), - new CandidateTimestampFormat("TAI64N", "TAI64N", "\\b[0-9A-Fa-f]{24}\\b", "\\b[0-9A-Fa-f]{24}\\b", "BASE16NUM") + "\\b%{MONTHDAY}/%{MONTH}/%{YEAR}:%{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) [+-]?%{HOUR}%{MINUTE}\\b", "HTTPDATE", + "11 1111 11 11 11", 0, 6), + new CandidateTimestampFormat(example -> Collections.singletonList("MMM dd, yyyy h:mm:ss a"), + "\\b[A-Z]\\S{2} \\d{2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", + "%{MONTH} %{MONTHDAY}, 20\\d{2} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60) (?:AM|PM)\\b", "CATALINA_DATESTAMP", + Arrays.asList(" 11 1111 1 11 11", " 11 1111 11 11 11"), 0, 3), + new CandidateTimestampFormat(example -> Arrays.asList("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss"), + "\\b[A-Z]\\S{2} {1,2}\\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", + "%{MONTH} +%{MONTHDAY} %{YEAR} %{HOUR}:%{MINUTE}:(?:[0-5][0-9]|60)\\b", "CISCOTIMESTAMP", + Arrays.asList(" 11 1111 11 11 11", " 1 1111 11 11 11"), 0, 0), + new CandidateTimestampFormat(CandidateTimestampFormat::indeterminateDayMonthFormatFromExample, + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "\\b%{DATESTAMP}\\b", "DATESTAMP", + // In DATESTAMP the month may be 1 or 2 digits, but the day must be 2 + Arrays.asList("11 11 1111 11 11 11", "1 11 1111 11 11 11", "11 1 1111 11 11 11"), 0, 10), + new CandidateTimestampFormat(CandidateTimestampFormat::indeterminateDayMonthFormatFromExample, + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "\\b%{DATE}\\b", "DATE", + // In DATE the month may be 1 or 2 digits, but the day must be 2 + Arrays.asList("11 11 1111", "11 1 1111", "1 11 1111"), 0, 0), + UNIX_MS_CANDIDATE_FORMAT, + UNIX_CANDIDATE_FORMAT, + TAI64N_CANDIDATE_FORMAT, + // This one is an ISO8601 date with no time, but the TIMESTAMP_ISO8601 Grok pattern doesn't cover it + new CandidateTimestampFormat(example -> Collections.singletonList("ISO8601"), + "\\b\\d{4}-\\d{2}-\\d{2}\\b", "\\b%{YEAR}-%{MONTHNUM2}-%{MONTHDAY}\\b", CUSTOM_TIMESTAMP_GROK_NAME, + "1111 11 11", 0, 0) ); - private TimestampFormatFinder() { + /** + * It is expected that the explanation will be shared with other code. + * Both this class and other classes will update it. + */ + private final List explanation; + private final boolean requireFullMatch; + private final boolean errorOnNoTimestamp; + private final boolean errorOnMultiplePatterns; + private final List orderedCandidateFormats; + private final TimeoutChecker timeoutChecker; + private final List matches; + // These two are not volatile because the class is explicitly not for use from multiple threads. + // But if it ever were to be made thread safe, making these volatile would be one required step. + private List matchedFormats; + private List cachedJavaTimestampFormats; + + /** + * Construct without any specific timestamp format override. + * @param explanation List of reasons for making decisions. May contain items when passed and new reasons + * can be appended by the methods of this class. + * @param requireFullMatch Must samples added to this object represent a timestamp in their entirety? + * @param errorOnNoTimestamp Should an exception be thrown if a sample is added that does not contain a recognised timestamp? + * @param errorOnMultiplePatterns Should an exception be thrown if samples are uploaded that require different Grok patterns? + * @param timeoutChecker Will abort the operation if its timeout is exceeded. + */ + public TimestampFormatFinder(List explanation, boolean requireFullMatch, boolean errorOnNoTimestamp, + boolean errorOnMultiplePatterns, TimeoutChecker timeoutChecker) { + this(explanation, null, requireFullMatch, errorOnNoTimestamp, errorOnMultiplePatterns, timeoutChecker); } /** - * Find the first timestamp format that matches part of the supplied value. - * @param text The value that the returned timestamp format must exist within. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * Construct with a timestamp format override. + * @param explanation List of reasons for making decisions. May contain items when passed and new reasons + * can be appended by the methods of this class. + * @param overrideFormat A timestamp format that will take precedence when looking for timestamps. If null + * then the effect is to have no such override, i.e. equivalent to calling the other constructor. + * Timestamps will also be matched that have slightly different formats, but match the same Grok + * pattern as is implied by the override format. + * @param requireFullMatch Must samples added to this object represent a timestamp in their entirety? + * @param errorOnNoTimestamp Should an exception be thrown if a sample is added that does not contain a recognised timestamp? + * @param errorOnMultiplePatterns Should an exception be thrown if samples are uploaded that require different Grok patterns? + * @param timeoutChecker Will abort the operation if its timeout is exceeded. */ - public static TimestampMatch findFirstMatch(String text, TimeoutChecker timeoutChecker) { - return findFirstMatch(text, 0, timeoutChecker); + public TimestampFormatFinder(List explanation, @Nullable String overrideFormat, boolean requireFullMatch, + boolean errorOnNoTimestamp, boolean errorOnMultiplePatterns, TimeoutChecker timeoutChecker) { + this.explanation = Objects.requireNonNull(explanation); + this.requireFullMatch = requireFullMatch; + this.errorOnNoTimestamp = errorOnNoTimestamp; + this.errorOnMultiplePatterns = errorOnMultiplePatterns; + this.orderedCandidateFormats = (overrideFormat != null) + ? Collections.singletonList(makeCandidateFromOverrideFormat(overrideFormat, timeoutChecker)) + : ORDERED_CANDIDATE_FORMATS; + this.timeoutChecker = Objects.requireNonNull(timeoutChecker); + this.matches = new ArrayList<>(); + this.matchedFormats = new ArrayList<>(); } /** - * Find the first timestamp format that matches part of the supplied value. - * @param text The value that the returned timestamp format must exist within. - * @param requiredFormat A timestamp format that any returned match must support. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * Convert a user supplied Java timestamp format to a Grok pattern and simple regular expression. + * @param overrideFormat A user supplied Java timestamp format. + * @return A tuple where the first value is a Grok pattern and the second is a simple regex. */ - public static TimestampMatch findFirstMatch(String text, String requiredFormat, TimeoutChecker timeoutChecker) { - return findFirstMatch(text, 0, requiredFormat, timeoutChecker); + static Tuple overrideFormatToGrokAndRegex(String overrideFormat) { + + if (overrideFormat.indexOf('\n') >= 0 || overrideFormat.indexOf('\r') >= 0) { + throw new IllegalArgumentException("Multi-line timestamp formats [" + overrideFormat + "] not supported"); + } + + if (overrideFormat.indexOf(INDETERMINATE_FIELD_PLACEHOLDER) >= 0) { + throw new IllegalArgumentException("Timestamp format [" + overrideFormat + "] not supported because it contains [" + + INDETERMINATE_FIELD_PLACEHOLDER + "]"); + } + + StringBuilder grokPatternBuilder = new StringBuilder(); + StringBuilder regexBuilder = new StringBuilder(); + + boolean notQuoted = true; + char prevChar = '\0'; + String prevLetterGroup = null; + int pos = 0; + while (pos < overrideFormat.length()) { + char curChar = overrideFormat.charAt(pos); + + if (curChar == '\'') { + notQuoted = !notQuoted; + } else if (notQuoted && Character.isLetter(curChar)) { + int startPos = pos; + int endPos = startPos + 1; + while (endPos < overrideFormat.length() && overrideFormat.charAt(endPos) == curChar) { + ++endPos; + ++pos; + } + String letterGroup = overrideFormat.substring(startPos, endPos); + Tuple grokPatternAndRegexForGroup = VALID_LETTER_GROUPS.get(letterGroup); + if (grokPatternAndRegexForGroup == null) { + // Special case of fractional seconds + if (curChar != 'S' || FRACTIONAL_SECOND_SEPARATORS.indexOf(prevChar) == -1 || + "ss".equals(prevLetterGroup) == false || endPos - startPos > 9) { + String msg = "Letter group [" + letterGroup + "] in [" + overrideFormat + "] is not supported"; + if (curChar == 'S') { + msg += " because it is not preceeded by [ss] and a separator from [" + FRACTIONAL_SECOND_SEPARATORS + "]"; + } + throw new IllegalArgumentException(msg); + } + // No need to append to the Grok pattern as %{SECOND} already allows for an optional + // fraction, but we need to remove the separator that's included in %{SECOND} + grokPatternBuilder.deleteCharAt(grokPatternBuilder.length() - 1); + regexBuilder.append("\\d{").append(endPos - startPos).append('}'); + } else { + grokPatternBuilder.append(grokPatternAndRegexForGroup.v1()); + if (regexBuilder.length() == 0) { + regexBuilder.append("\\b"); + } + regexBuilder.append(grokPatternAndRegexForGroup.v2()); + } + if (pos + 1 == overrideFormat.length()) { + regexBuilder.append("\\b"); + } + prevLetterGroup = letterGroup; + } else { + if (PUNCTUATION_THAT_NEEDS_ESCAPING_IN_REGEX.indexOf(curChar) >= 0) { + grokPatternBuilder.append('\\'); + regexBuilder.append('\\'); + } + grokPatternBuilder.append(curChar); + regexBuilder.append(curChar); + } + + prevChar = curChar; + ++pos; + } + + if (prevLetterGroup == null) { + throw new IllegalArgumentException("No time format letter groups in override format [" + overrideFormat + "]"); + } + + return new Tuple<>(grokPatternBuilder.toString(), regexBuilder.toString()); } /** - * Find the first timestamp format that matches part of the supplied value, - * excluding a specified number of candidate formats. - * @param text The value that the returned timestamp format must exist within. - * @param ignoreCandidates The number of candidate formats to exclude from the search. + * Given a user supplied Java timestamp format, return an appropriate candidate timestamp object as required by this class. + * The returned candidate might be a built-in one, or might be generated from the supplied format. + * @param overrideFormat A user supplied Java timestamp format. * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * @return An appropriate candidate timestamp object. */ - public static TimestampMatch findFirstMatch(String text, int ignoreCandidates, TimeoutChecker timeoutChecker) { - return findFirstMatch(text, ignoreCandidates, null, timeoutChecker); + static CandidateTimestampFormat makeCandidateFromOverrideFormat(String overrideFormat, TimeoutChecker timeoutChecker) { + + // First check for a special format string + switch (overrideFormat.toUpperCase(Locale.ROOT)) { + case "ISO8601": + return ISO8601_CANDIDATE_FORMAT; + case "UNIX_MS": + return UNIX_MS_CANDIDATE_FORMAT; + case "UNIX": + return UNIX_CANDIDATE_FORMAT; + case "TAI64N": + return TAI64N_CANDIDATE_FORMAT; + } + + // Next check for a built-in candidate that incorporates the override, and prefer this + + // If the override is not a valid format then one or other of these two calls will + // throw, and that is how we'll report the invalid format to the user + Tuple grokPatternAndRegex = overrideFormatToGrokAndRegex(overrideFormat); + DateTimeFormatter javaTimeFormatter = DateTimeFormatter.ofPattern(overrideFormat, Locale.ROOT); + + // This timestamp (2001-02-03T04:05:06,123456789+0545) is chosen such that the month, day and hour all have just 1 digit. + // This means that it will distinguish between formats that do/don't output leading zeroes for month, day and hour. + // Additionally it has the full 9 digits of fractional second precision, to avoid the possibility of truncating the fraction. + String generatedTimestamp = javaTimeFormatter.withZone(ZoneOffset.ofHoursMinutesSeconds(5, 45, 0)) + .format(Instant.ofEpochMilli(981173106123L).plusNanos(456789L)); + for (CandidateTimestampFormat candidate : ORDERED_CANDIDATE_FORMATS) { + + TimestampMatch match = checkCandidate(candidate, generatedTimestamp, null, true, timeoutChecker); + if (match != null) { + return new CandidateTimestampFormat(example -> { + + // Modify the built-in candidate so it prefers to return the user supplied format + // if at all possible, and only falls back to standard logic for other situations + try { + // TODO consider support for overriding the locale too + // But since Grok only supports English and German date words ingest + // via Grok will fall down at an earlier stage for other languages... + javaTimeFormatter.parse(example); + return Collections.singletonList(overrideFormat); + } catch (DateTimeException e) { + return candidate.javaTimestampFormatSupplier.apply(example); + } + }, candidate.simplePattern.pattern(), candidate.strictGrokPattern, candidate.outputGrokPatternName); + } + } + + // None of the out-of-the-box formats were close, so use the built Grok pattern and simple regex for the override + return new CandidateTimestampFormat(example -> Collections.singletonList(overrideFormat), + grokPatternAndRegex.v2(), grokPatternAndRegex.v1(), CUSTOM_TIMESTAMP_GROK_NAME); } /** - * Find the first timestamp format that matches part of the supplied value, - * excluding a specified number of candidate formats. - * @param text The value that the returned timestamp format must exist within. - * @param ignoreCandidates The number of candidate formats to exclude from the search. - * @param requiredFormat A timestamp format that any returned match must support. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. + * Find the first timestamp format that matches part or all of the supplied text. + * @param candidate The timestamp candidate to consider. + * @param text The text that the returned timestamp format must exist within. + * @param numberPosBitSet If not null, each bit must be set to true if and only if the + * corresponding position in {@code text} is a digit. + * @param requireFullMatch Does the candidate have to match the entire text? + * @param timeoutChecker Will abort the operation if its timeout is exceeded. * @return The timestamp format, or null if none matches. */ - public static TimestampMatch findFirstMatch(String text, int ignoreCandidates, String requiredFormat, TimeoutChecker timeoutChecker) { - if (ignoreCandidates >= ORDERED_CANDIDATE_FORMATS.size()) { - return null; - } - Boolean[] quickRuleoutMatches = new Boolean[QUICK_RULE_OUT_PATTERNS.size()]; - int index = ignoreCandidates; - String adjustedRequiredFormat = adjustRequiredFormat(requiredFormat); - for (CandidateTimestampFormat candidate : ORDERED_CANDIDATE_FORMATS.subList(ignoreCandidates, ORDERED_CANDIDATE_FORMATS.size())) { - if (adjustedRequiredFormat == null || candidate.jodaTimestampFormats.contains(adjustedRequiredFormat) || - candidate.javaTimestampFormats.contains(adjustedRequiredFormat)) { - boolean quicklyRuledOut = false; - for (Integer quickRuleOutIndex : candidate.quickRuleOutIndices) { - if (quickRuleoutMatches[quickRuleOutIndex] == null) { - quickRuleoutMatches[quickRuleOutIndex] = QUICK_RULE_OUT_PATTERNS.get(quickRuleOutIndex).matcher(text).find(); + private static TimestampMatch checkCandidate(CandidateTimestampFormat candidate, String text, @Nullable BitSet numberPosBitSet, + boolean requireFullMatch, TimeoutChecker timeoutChecker) { + if (requireFullMatch) { + Map captures = timeoutChecker.grokCaptures(candidate.strictFullMatchGrok, text, + "timestamp format determination"); + if (captures != null) { + return new TimestampMatch(candidate, "", text, ""); + } + } else { + // Since a search in a long string that has sections that nearly match will be very slow, it's + // worth doing an initial sanity check to see if the relative positions of digits necessary to + // get a match exist first + Tuple boundsForCandidate = findBoundsForCandidate(candidate, numberPosBitSet); + if (boundsForCandidate.v1() >= 0) { + assert boundsForCandidate.v2() > boundsForCandidate.v1(); + String matchIn = text.substring(boundsForCandidate.v1(), Math.min(boundsForCandidate.v2(), text.length())); + Map captures = timeoutChecker.grokCaptures(candidate.strictSearchGrok, matchIn, + "timestamp format determination"); + if (captures != null) { + StringBuilder prefaceBuilder = new StringBuilder(); + if (boundsForCandidate.v1() > 0) { + prefaceBuilder.append(text.subSequence(0, boundsForCandidate.v1())); + } + prefaceBuilder.append(captures.getOrDefault(PREFACE, "")); + StringBuilder epilogueBuilder = new StringBuilder(); + epilogueBuilder.append(captures.getOrDefault(EPILOGUE, "")); + if (boundsForCandidate.v2() < text.length()) { + epilogueBuilder.append(text.subSequence(boundsForCandidate.v2(), text.length())); } - if (quickRuleoutMatches[quickRuleOutIndex] == false) { - quicklyRuledOut = true; + return new TimestampMatch(candidate, prefaceBuilder.toString(), text.substring(prefaceBuilder.length(), + text.length() - epilogueBuilder.length()), epilogueBuilder.toString()); + } + } else { + timeoutChecker.check("timestamp format determination"); + } + } + + return null; + } + + /** + * Add a sample value to be considered by the format finder. If {@code requireFullMatch} was set to + * true on construction then the entire sample will be tested to see if it is a timestamp, + * otherwise a timestamp may be detected as just a portion of the sample. An exception will be thrown + * if {@code errorOnNoTimestamp} was set to true on construction, and no timestamp is + * found. An exception will also be thrown if {@code errorOnMultiplePatterns} was set to true + * on construction and a new timestamp format is detected that cannot be merged with a previously detected + * format. + * @param text The sample in which to detect a timestamp. + */ + public void addSample(String text) { + + BitSet numberPosBitSet = requireFullMatch ? null : stringToNumberPosBitSet(text); + + for (CandidateTimestampFormat candidate : orderedCandidateFormats) { + + TimestampMatch match = checkCandidate(candidate, text, numberPosBitSet, requireFullMatch, timeoutChecker); + if (match != null) { + TimestampFormat newFormat = match.timestampFormat; + boolean mustAdd = true; + for (int i = 0; i < matchedFormats.size(); ++i) { + TimestampFormat existingFormat = matchedFormats.get(i); + if (existingFormat.canMergeWith(newFormat)) { + matchedFormats.set(i, existingFormat.mergeWith(newFormat)); + mustAdd = false; + // Sharing formats considerably reduces the memory usage during the analysis + // when there are many samples, so reconstruct the match with a shared format + match = new TimestampMatch(match, matchedFormats.get(i)); break; } } - if (quicklyRuledOut == false) { - Map captures = timeoutChecker.grokCaptures(candidate.strictSearchGrok, text, - "timestamp format determination"); - if (captures != null) { - String preface = captures.getOrDefault(PREFACE, "").toString(); - String epilogue = captures.getOrDefault(EPILOGUE, "").toString(); - return makeTimestampMatch(candidate, index, preface, text.substring(preface.length(), - text.length() - epilogue.length()), epilogue); + if (mustAdd) { + if (errorOnMultiplePatterns && matchedFormats.isEmpty() == false) { + throw new IllegalArgumentException("Multiple timestamp formats found [" + + matchedFormats.get(0) + "] and [" + newFormat + "]"); } + matchedFormats.add(newFormat); } + + matches.add(match); + cachedJavaTimestampFormats = null; + return; } - ++index; } - return null; + + if (errorOnNoTimestamp) { + throw new IllegalArgumentException("No timestamp found in [" + text + "]"); + } } /** - * Find the best timestamp format for matching an entire field value. - * @param text The value that the returned timestamp format must match in its entirety. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * Where multiple timestamp formats have been found, select the "best" one, whose details + * will then be returned by methods such as {@link #getGrokPatternName} and + * {@link #getJavaTimestampFormats}. If fewer than two timestamp formats have been found + * then this method does nothing. */ - public static TimestampMatch findFirstFullMatch(String text, TimeoutChecker timeoutChecker) { - return findFirstFullMatch(text, 0, timeoutChecker); + public void selectBestMatch() { + + if (matchedFormats.size() < 2) { + // Nothing to do + return; + } + + double[] weights = calculateMatchWeights(); + timeoutChecker.check("timestamp format determination"); + int highestWeightFormatIndex = findHighestWeightIndex(weights); + timeoutChecker.check("timestamp format determination"); + selectHighestWeightFormat(highestWeightFormatIndex); } /** - * Find the best timestamp format for matching an entire field value. - * @param text The value that the returned timestamp format must match in its entirety. - * @param requiredFormat A timestamp format that any returned match must support. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * For each matched format, calculate a weight that can be used to decide which match is best. The + * weight for each matched format is the sum of the weights for all matches that have that format. + * @return An array of weights. There is one entry in the array for each entry in {@link #matchedFormats}, + * in the same order as the entries in {@link #matchedFormats}. */ - public static TimestampMatch findFirstFullMatch(String text, String requiredFormat, TimeoutChecker timeoutChecker) { - return findFirstFullMatch(text, 0, requiredFormat, timeoutChecker); + private double[] calculateMatchWeights() { + + int remainingMatches = matches.size(); + double[] weights = new double[matchedFormats.size()]; + for (TimestampMatch match : matches) { + + for (int matchedFormatIndex = 0; matchedFormatIndex < matchedFormats.size(); ++matchedFormatIndex) { + if (matchedFormats.get(matchedFormatIndex).canMergeWith(match.timestampFormat)) { + weights[matchedFormatIndex] += weightForMatch(match.preface); + break; + } + ++matchedFormatIndex; + } + + // The highest possible weight is 1, so if the difference between the two highest weights + // is less than the number of lines remaining then the leader cannot possibly be overtaken + if (findDifferenceBetweenTwoHighestWeights(weights) > --remainingMatches) { + break; + } + } + + return weights; } /** - * Find the best timestamp format for matching an entire field value, - * excluding a specified number of candidate formats. - * @param text The value that the returned timestamp format must match in its entirety. - * @param ignoreCandidates The number of candidate formats to exclude from the search. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * Used to weight a timestamp match according to how far along the line it is found. + * Timestamps at the very beginning of the line are given a weight of 1. The weight + * progressively decreases the more text there is preceding the timestamp match, but + * is always greater than 0. + * @return A weight in the range (0, 1]. */ - public static TimestampMatch findFirstFullMatch(String text, int ignoreCandidates, TimeoutChecker timeoutChecker) { - return findFirstFullMatch(text, ignoreCandidates, null, timeoutChecker); + private static double weightForMatch(String preface) { + return Math.pow(1.0 + preface.length() / 15.0, -1.1); } /** - * Find the best timestamp format for matching an entire field value, - * excluding a specified number of candidate formats. - * @param text The value that the returned timestamp format must match in its entirety. - * @param ignoreCandidates The number of candidate formats to exclude from the search. - * @param requiredFormat A timestamp format that any returned match must support. - * @param timeoutChecker Will abort the operation if its timeout is exceeded. - * @return The timestamp format, or null if none matches. + * Given an array of weights, find the difference between the two highest values. + * @param weights Array of weights. Must have at least two elements. + * @return The difference between the two highest values. */ - public static TimestampMatch findFirstFullMatch(String text, int ignoreCandidates, String requiredFormat, - TimeoutChecker timeoutChecker) { - if (ignoreCandidates >= ORDERED_CANDIDATE_FORMATS.size()) { + private static double findDifferenceBetweenTwoHighestWeights(double[] weights) { + assert weights.length >= 2; + + double highestWeight = 0.0; + double secondHighestWeight = 0.0; + for (double weight : weights) { + if (weight > highestWeight) { + secondHighestWeight = highestWeight; + highestWeight = weight; + } else if (weight > secondHighestWeight) { + secondHighestWeight = weight; + } + } + return highestWeight - secondHighestWeight; + } + + /** + * Given an array of weights, find the index with the highest weight. + * @param weights Array of weights. + * @return The index of the element with the highest weight. + */ + private static int findHighestWeightIndex(double[] weights) { + + double highestWeight = Double.NEGATIVE_INFINITY; + int highestWeightFormatIndex = -1; + for (int index = 0; index < weights.length; ++index) { + double weight = weights[index]; + if (weight > highestWeight) { + highestWeight = weight; + highestWeightFormatIndex = index; + } + } + + return highestWeightFormatIndex; + } + + /** + * Ensure the highest weight matched format is at the beginning of the list of matched formats. + * @param highestWeightFormatIndex The index of the matched format with the highest weight. + */ + private void selectHighestWeightFormat(int highestWeightFormatIndex) { + + assert highestWeightFormatIndex >= 0; + // If the selected format is already at the beginning of the list there's nothing to do + if (highestWeightFormatIndex == 0) { + return; + } + + cachedJavaTimestampFormats = null; + List newMatchedFormats = new ArrayList<>(matchedFormats); + // Swap the selected format with the one that's currently at the beginning of the list + newMatchedFormats.set(0, matchedFormats.get(highestWeightFormatIndex)); + newMatchedFormats.set(highestWeightFormatIndex, matchedFormats.get(0)); + matchedFormats = newMatchedFormats; + } + + /** + * How many different timestamp formats have been matched in the supplied samples? + * @return The number of different timestamp formats that have been matched in the supplied samples. + */ + public int getNumMatchedFormats() { + return matchedFormats.size(); + } + + /** + * Get the Grok pattern name that corresponds to the selected timestamp format. + * @return The Grok pattern name that corresponds to the selected timestamp format. + */ + public String getGrokPatternName() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; return null; } - int index = ignoreCandidates; - String adjustedRequiredFormat = adjustRequiredFormat(requiredFormat); - for (CandidateTimestampFormat candidate : ORDERED_CANDIDATE_FORMATS.subList(ignoreCandidates, ORDERED_CANDIDATE_FORMATS.size())) { - if (adjustedRequiredFormat == null || candidate.jodaTimestampFormats.contains(adjustedRequiredFormat) || - candidate.javaTimestampFormats.contains(adjustedRequiredFormat)) { - Map captures = timeoutChecker.grokCaptures(candidate.strictFullMatchGrok, text, - "timestamp format determination"); - if (captures != null) { - return makeTimestampMatch(candidate, index, "", text, ""); + return matchedFormats.get(0).grokPatternName; + } + + /** + * Get the custom Grok pattern definitions derived from the override format, if any. + * @return The custom Grok pattern definitions for the selected timestamp format. + * If there are none an empty map is returned. + */ + public Map getCustomGrokPatternDefinitions() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; + return Collections.emptyMap(); + } + return matchedFormats.get(0).customGrokPatternDefinitions; + } + + /** + * Of all the samples added that correspond to the selected format, return + * the portion of the sample that comes before the timestamp. + * @return A list of prefaces from samples that match the selected timestamp format. + */ + public List getPrefaces() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; + return Collections.emptyList(); + } + return matches.stream().filter(match -> matchedFormats.size() < 2 || matchedFormats.get(0).canMergeWith(match.timestampFormat)) + .map(match -> match.preface).collect(Collectors.toList()); + } + + /** + * Get the simple regular expression that can be used to identify timestamps + * of the selected format in almost any programming language. + * @return A {@link Pattern} that will match timestamps of the selected format. + */ + public Pattern getSimplePattern() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; + return null; + } + return matchedFormats.get(0).simplePattern; + } + + /** + * These are similar to Java timestamp formats but may contain indeterminate day/month + * placeholders if the order of day and month is uncertain. + * @return A list of Java timestamp formats possibly containing indeterminate day/month placeholders. + */ + public List getRawJavaTimestampFormats() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; + return Collections.emptyList(); + } + return matchedFormats.get(0).rawJavaTimestampFormats; + } + + /** + * These are used by ingest pipeline and index mappings. + * @return A list of Java timestamp formats to use for parsing documents. + */ + public List getJavaTimestampFormats() { + if (cachedJavaTimestampFormats != null) { + return cachedJavaTimestampFormats; + } + return determiniseJavaTimestampFormats(getRawJavaTimestampFormats(), + // With multiple formats, only consider the matches that correspond to the first + // in the list (which is what we're returning information about via the getters). + // With just one format it's most efficient not to bother checking formats. + (matchedFormats.size() > 1) ? matchedFormats.get(0) : null); + } + + /** + * Given a list of timestamp formats that might contain indeterminate day/month parts, + * return the corresponding pattern with the placeholders replaced with concrete + * day/month formats. + */ + private List determiniseJavaTimestampFormats(List rawJavaTimestampFormats, + @Nullable TimestampFormat onlyConsiderFormat) { + + // This method needs rework if the class is ever made thread safe + + if (rawJavaTimestampFormats.stream().anyMatch(format -> format.indexOf(INDETERMINATE_FIELD_PLACEHOLDER) >= 0)) { + boolean isDayFirst = guessIsDayFirst(rawJavaTimestampFormats, onlyConsiderFormat, Locale.getDefault()); + cachedJavaTimestampFormats = rawJavaTimestampFormats.stream() + .map(format -> determiniseJavaTimestampFormat(format, isDayFirst)).collect(Collectors.toList()); + } else { + cachedJavaTimestampFormats = rawJavaTimestampFormats; + } + return cachedJavaTimestampFormats; + } + + /** + * If timestamp formats where the order of day and month could vary (as in a choice between dd/MM/yyyy + * or MM/dd/yyyy for example), make a guess about whether the day comes first. + * @return true if the day comes first and false if the month comes first. + */ + private boolean guessIsDayFirst(List rawJavaTimestampFormats, @Nullable TimestampFormat onlyConsiderFormat, + Locale localeForFallback) { + + Boolean isDayFirst = guessIsDayFirstFromFormats(rawJavaTimestampFormats); + if (isDayFirst != null) { + return isDayFirst; + } + isDayFirst = guessIsDayFirstFromMatches(onlyConsiderFormat); + if (isDayFirst != null) { + return isDayFirst; + } + return guessIsDayFirstFromLocale(localeForFallback); + } + + /** + * If timestamp formats where the order of day and month could vary (as in a choice between dd/MM/yyyy + * or MM/dd/yyyy for example), make a guess about whether the day comes first based on quirks of the + * built-in Grok patterns. + * @return true if the day comes first, false if the month comes first, and + * null if there is insufficient evidence to decide. + */ + Boolean guessIsDayFirstFromFormats(List rawJavaTimestampFormats) { + + Boolean isDayFirst = null; + + for (String rawJavaTimestampFormat : rawJavaTimestampFormats) { + Matcher matcher = INDETERMINATE_FORMAT_INTERPRETER.matcher(rawJavaTimestampFormat); + if (matcher.matches()) { + String firstNumber = matcher.group(2); + assert firstNumber != null; + String secondNumber = matcher.group(4); + if (secondNumber == null) { + return null; + } + if (firstNumber.length() == 2 && secondNumber.length() == 1) { + if (Boolean.FALSE.equals(isDayFirst)) { + // Inconsistency + return null; + } + isDayFirst = Boolean.TRUE; } + if (firstNumber.length() == 1 && secondNumber.length() == 2) { + if (Boolean.TRUE.equals(isDayFirst)) { + // Inconsistency + return null; + } + isDayFirst = Boolean.FALSE; + } + } + } + + if (isDayFirst != null) { + if (isDayFirst) { + explanation.add("Guessing day precedes month in timestamps as all detected formats have a two digits in the first number " + + "and a single digit in the second number which is what the %{MONTHDAY} and %{MONTHNUM} Grok patterns permit"); + } else { + explanation.add("Guessing month precedes day in timestamps as all detected formats have a single digit in the first number " + + "and two digits in the second number which is what the %{MONTHNUM} and %{MONTHDAY} Grok patterns permit"); } - ++index; } + + return isDayFirst; + } + + /** + * If timestamp formats where the order of day and month could vary (as in a choice between dd/MM/yyyy + * or MM/dd/yyyy for example), make a guess about whether the day comes first based on observed values + * of the first and second numbers. + * @return true if the day comes first, false if the month comes first, and + * null if there is insufficient evidence to decide. + */ + Boolean guessIsDayFirstFromMatches(@Nullable TimestampFormat onlyConsiderFormat) { + + BitSet firstIndeterminateNumbers = new BitSet(); + BitSet secondIndeterminateNumbers = new BitSet(); + + for (TimestampMatch match : matches) { + + if (onlyConsiderFormat == null || onlyConsiderFormat.canMergeWith(match.timestampFormat)) { + + // Valid indeterminate day/month numbers will be in the range 1 to 31. + // -1 is used to mean "not present", and we ignore that here. + + if (match.firstIndeterminateDateNumber > 0) { + assert match.firstIndeterminateDateNumber <= 31; + if (match.firstIndeterminateDateNumber > 12) { + explanation.add("Guessing day precedes month in timestamps as one sample had first number [" + + match.firstIndeterminateDateNumber + "]"); + return Boolean.TRUE; + } + firstIndeterminateNumbers.set(match.firstIndeterminateDateNumber); + } + if (match.secondIndeterminateDateNumber > 0) { + assert match.secondIndeterminateDateNumber <= 31; + if (match.secondIndeterminateDateNumber > 12) { + explanation.add("Guessing month precedes day in timestamps as one sample had second number [" + + match.secondIndeterminateDateNumber + "]"); + return Boolean.FALSE; + } + secondIndeterminateNumbers.set(match.secondIndeterminateDateNumber); + } + } + } + + // If there are many more values of one number than the other then assume that's the day + final int ratioForResult = 3; + int firstCardinality = firstIndeterminateNumbers.cardinality(); + int secondCardinality = secondIndeterminateNumbers.cardinality(); + if (secondCardinality == 0) { + // This happens in the following cases: + // - No indeterminate numbers (in which case the answer is irrelevant) + // - Only one indeterminate number (in which case we favour month over day) + return Boolean.FALSE; + } + // firstCardinality can be 0, but then secondCardinality should have been 0 too + assert firstCardinality > 0; + if (firstCardinality >= ratioForResult * secondCardinality) { + explanation.add("Guessing day precedes month in timestamps as there were [" + + firstCardinality + "] distinct values of the first number but only [" + secondCardinality + "] for the second"); + return Boolean.TRUE; + } + if (secondCardinality >= ratioForResult * firstCardinality) { + explanation.add("Guessing month precedes day in timestamps as there " + (firstCardinality == 1 ? "was" : "were") + " only [" + + firstCardinality + "] distinct " + (firstCardinality == 1 ? "value" : "values") + + " of the first number but [" + secondCardinality + "] for the second"); + return Boolean.FALSE; + } + return null; } /** - * If a required timestamp format contains a fractional seconds component, adjust it to the - * fractional seconds format that's in the candidate timestamp formats, i.e. ",SSS". So, for - * example, "YYYY-MM-dd HH:mm:ss.SSSSSSSSS Z" would get adjusted to "YYYY-MM-dd HH:mm:ss,SSS Z". + * If timestamp formats where the order of day and month could vary (as in a choice between dd/MM/yyyy + * or MM/dd/yyyy for example), make a guess about whether the day comes first based on the default order + * for a given locale. + * @return true if the day comes first and false if the month comes first. */ - static String adjustRequiredFormat(String requiredFormat) { + boolean guessIsDayFirstFromLocale(Locale locale) { - return (requiredFormat == null) ? null : - FRACTIONAL_SECOND_TIMESTAMP_FORMAT_PATTERN.matcher(requiredFormat).replaceFirst(DEFAULT_FRACTIONAL_SECOND_FORMAT); + // Fall back to whether the day comes before the month in the default short date format for the server locale. + // Can't use 1 as that occurs in 1970, so 3rd Feb is the earliest date that will reveal the server default. + String feb3rd1970 = makeShortLocalizedDateTimeFormatterForLocale(locale).format(LocalDate.ofEpochDay(33)); + if (feb3rd1970.indexOf('3') < feb3rd1970.indexOf('2')) { + explanation.add("Guessing day precedes month in timestamps based on server locale [" + + locale.getDisplayName(Locale.ROOT) + "]"); + return true; + } else { + explanation.add("Guessing month precedes day in timestamps based on server locale [" + + locale.getDisplayName(Locale.ROOT) + "]"); + return false; + } } - private static TimestampMatch makeTimestampMatch(CandidateTimestampFormat chosenTimestampFormat, int chosenIndex, - String preface, String matchedDate, String epilogue) { - Tuple fractionalSecondsInterpretation = interpretFractionalSeconds(matchedDate); - List jodaTimestampFormats = chosenTimestampFormat.jodaTimestampFormats; - List javaTimestampFormats = chosenTimestampFormat.javaTimestampFormats; - Pattern simplePattern = chosenTimestampFormat.simplePattern; - char separator = fractionalSecondsInterpretation.v1(); - if (separator != DEFAULT_FRACTIONAL_SECOND_SEPARATOR) { - jodaTimestampFormats = jodaTimestampFormats.stream() - .map(jodaTimestampFormat -> jodaTimestampFormat.replace(DEFAULT_FRACTIONAL_SECOND_SEPARATOR, separator)) - .collect(Collectors.toList()); - javaTimestampFormats = javaTimestampFormats.stream() - .map(javaTimestampFormat -> javaTimestampFormat.replace(DEFAULT_FRACTIONAL_SECOND_SEPARATOR, separator)) - .collect(Collectors.toList()); - if (jodaTimestampFormats.stream().noneMatch(jodaTimestampFormat -> jodaTimestampFormat.startsWith("UNIX"))) { - String patternStr = simplePattern.pattern(); - int separatorPos = patternStr.lastIndexOf(DEFAULT_FRACTIONAL_SECOND_SEPARATOR); - if (separatorPos >= 0) { - StringBuilder newPatternStr = new StringBuilder(patternStr); - newPatternStr.replace(separatorPos, separatorPos + 1, ((separator == '.') ? "\\" : "") + separator); - simplePattern = Pattern.compile(newPatternStr.toString()); + @SuppressForbidden(reason = "DateTimeFormatter.ofLocalizedDate() is forbidden because it uses the default locale, " + + "but here we are explicitly setting the locale on the formatter in a subsequent call") + private static DateTimeFormatter makeShortLocalizedDateTimeFormatterForLocale(Locale locale) { + return DateTimeFormatter.ofLocalizedDate(FormatStyle.SHORT).withLocale(locale).withZone(ZoneOffset.UTC); + } + + /** + * Given a raw timestamp format that might contain indeterminate day/month parts, + * return the corresponding pattern with the placeholders replaced with concrete + * day/month formats. + */ + static String determiniseJavaTimestampFormat(String rawJavaTimestampFormat, boolean isDayFirst) { + + Matcher matcher = INDETERMINATE_FORMAT_INTERPRETER.matcher(rawJavaTimestampFormat); + if (matcher.matches()) { + StringBuilder builder = new StringBuilder(); + for (int groupNum = 1; groupNum <= matcher.groupCount(); ++groupNum) { + switch (groupNum) { + case 2: { + char formatChar = isDayFirst ? 'd' : 'M'; + for (int count = matcher.group(groupNum).length(); count > 0; --count) { + builder.append(formatChar); + } + break; + } + case 4: { + char formatChar = isDayFirst ? 'M' : 'd'; + for (int count = matcher.group(groupNum).length(); count > 0; --count) { + builder.append(formatChar); + } + break; + } + default: + builder.append(matcher.group(groupNum)); + break; } } + return builder.toString(); + } else { + return rawJavaTimestampFormat; } - int numberOfDigitsInFractionalComponent = fractionalSecondsInterpretation.v2(); - if (numberOfDigitsInFractionalComponent > 3) { - String fractionalSecondsFormat = "SSSSSSSSS".substring(0, numberOfDigitsInFractionalComponent); - jodaTimestampFormats = jodaTimestampFormats.stream() - .map(jodaTimestampFormat -> jodaTimestampFormat.replace("SSS", fractionalSecondsFormat)) - .collect(Collectors.toList()); - javaTimestampFormats = javaTimestampFormats.stream() - .map(javaTimestampFormat -> javaTimestampFormat.replace("SSS", fractionalSecondsFormat)) - .collect(Collectors.toList()); + } + + /** + * These are still used by Logstash. + * @return A list of Joda timestamp formats that correspond to the detected Java timestamp formats. + */ + public List getJodaTimestampFormats() { + List javaTimestampFormats = getJavaTimestampFormats(); + return (javaTimestampFormats == null) ? null : javaTimestampFormats.stream() + .map(format -> format.replace("yy", "YY").replace("XXX", "ZZ").replace("XX", "Z")).collect(Collectors.toList()); + } + + /** + * Does the parsing the timestamp produce different results depending on the timezone of the parser? + * I.e., does the textual representation NOT define the timezone? + */ + public boolean hasTimezoneDependentParsing() { + if (matchedFormats.isEmpty()) { + // If errorOnNoTimestamp is set and we get here it means no samples have been added, which is likely a programmer mistake + assert errorOnNoTimestamp == false; + return false; } - return new TimestampMatch(chosenIndex, preface, jodaTimestampFormats, javaTimestampFormats, simplePattern, - chosenTimestampFormat.standardGrokPatternName, epilogue); + return matches.stream().filter(match -> matchedFormats.size() < 2 || matchedFormats.get(0).canMergeWith(match.timestampFormat)) + .anyMatch(match -> match.hasTimezoneDependentParsing); } /** - * Interpret the fractional seconds component of a date to determine two things: - * 1. The separator character - one of colon, comma and dot. - * 2. The number of digits in the fractional component. - * @param date The textual representation of the date for which fractional seconds are to be interpreted. - * @return A tuple of (fractional second separator character, number of digits in fractional component). + * Sometimes Elasticsearch mappings for dates need to include the format. + * This method returns appropriate mappings settings: at minimum "type" : "date", + * and possibly also a "format" setting. */ - static Tuple interpretFractionalSeconds(String date) { + public Map getEsDateMappingTypeWithFormat() { + List javaTimestampFormats = getJavaTimestampFormats(); + if (javaTimestampFormats.contains("TAI64N")) { + // There's no format for TAI64N in the timestamp formats used in mappings + return Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"); + } + Map mapping = new LinkedHashMap<>(); + mapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + String formats = javaTimestampFormats.stream().map(format -> { + switch (format) { + case "ISO8601": + return "iso8601"; + case "UNIX_MS": + return "epoch_millis"; + case "UNIX": + return "epoch_second"; + default: + return format; + } + }).collect(Collectors.joining("||")); + if (formats.isEmpty() == false) { + mapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, formats); + } + return mapping; + } - Matcher matcher = FRACTIONAL_SECOND_INTERPRETER.matcher(date); - if (matcher.find()) { - return new Tuple<>(matcher.group(1).charAt(0), matcher.group(2).length()); + /** + * Given a timestamp candidate and a bit set showing the positions of digits in a piece of text, find the range + * of indices over which the candidate might possibly match. Searching for Grok patterns that nearly + * match but don't quite is very expensive, so this method allows only a substring of a long string to be + * searched using the full Grok pattern. + * @param candidate The timestamp candidate to consider. + * @param numberPosBitSet If not null, each bit must be set to true if and only if the + * corresponding position in the original text is a digit. + * @return A tuple of the form (start index, end index). If the timestamp candidate cannot possibly match + * anywhere then (-1, -1) is returned. The end index in the returned tuple may be beyond the end of the + * string (because the bit set is not necessarily the same length as the string so it cannot be capped + * by this method), so the caller must cap it before passing to {@link String#substring(int, int)}. + */ + static Tuple findBoundsForCandidate(CandidateTimestampFormat candidate, BitSet numberPosBitSet) { + + if (numberPosBitSet == null || candidate.quickRuleOutBitSets.isEmpty()) { + return new Tuple<>(0, Integer.MAX_VALUE); } - return new Tuple<>(DEFAULT_FRACTIONAL_SECOND_SEPARATOR, 0); + int minFirstMatchStart = -1; + int maxLastMatchEnd = -1; + for (BitSet quickRuleOutBitSet : candidate.quickRuleOutBitSets) { + int currentMatch = findBitPattern(numberPosBitSet, 0, quickRuleOutBitSet); + if (currentMatch >= 0) { + if (minFirstMatchStart == -1 || currentMatch < minFirstMatchStart) { + minFirstMatchStart = currentMatch; + } + do { + int currentMatchEnd = currentMatch + quickRuleOutBitSet.length(); + if (currentMatchEnd > maxLastMatchEnd) { + maxLastMatchEnd = currentMatchEnd; + } + currentMatch = findBitPattern(numberPosBitSet, currentMatch + 1, quickRuleOutBitSet); + } while (currentMatch > 0); + } + } + if (minFirstMatchStart == -1) { + assert maxLastMatchEnd == -1; + return new Tuple<>(-1, -1); + } + int lowerBound = Math.max(0, minFirstMatchStart - candidate.maxCharsBeforeQuickRuleOutMatch); + int upperBound = (Integer.MAX_VALUE - candidate.maxCharsAfterQuickRuleOutMatch - maxLastMatchEnd < 0) ? + Integer.MAX_VALUE : (maxLastMatchEnd + candidate.maxCharsAfterQuickRuleOutMatch); + return new Tuple<>(lowerBound, upperBound); } /** - * Represents a timestamp that has matched a field value or been found within a message. + * This is basically the "Shift-Add" algorithm for string matching from the paper "A New Approach to Text Searching". + * In this case the "alphabet" has just two "characters": 0 and 1 (or false and true in + * some places because of the {@link BitSet} interface). + * @see A New Approach to Text Searching + * @param findIn The binary string to search in; "text" in the terminology of the paper. + * @param beginIndex The index to start searching {@code findIn}. + * @param toFind The binary string to find; "pattern" in the terminology of the paper. + * @return The index (starting from 0) of the first match of {@code toFind} in {@code findIn}, or -1 if no match is found. */ - public static final class TimestampMatch { + static int findBitPattern(BitSet findIn, int beginIndex, BitSet toFind) { - /** - * The index of the corresponding entry in the ORDERED_CANDIDATE_FORMATS list. - */ - public final int candidateIndex; + assert beginIndex >= 0; - /** - * Text that came before the timestamp in the matched field/message. - */ - public final String preface; + // Note that this only compares up to the highest bit that is set, so trailing non digit characters will not participate + // in the comparison. This is not currently a problem for this class, but is something to consider if this functionality + // is ever reused elsewhere. The solution would be to use a wrapper class containing a BitSet and a separate int to store + // the length to compare. + int toFindLength = toFind.length(); + int findInLength = findIn.length(); + if (toFindLength == 0) { + return beginIndex; + } + // 63 here is the largest bit position (starting from 0) in a long + if (toFindLength > Math.min(63, findInLength)) { + // Since we control the input we should avoid the situation + // where the pattern to find has more bits than a single long + assert toFindLength <= 63 : "Length to find was [" + toFindLength + "] - cannot be greater than 63"; + return -1; + } + // ~1L means all bits set except the least significant + long state = ~1L; + // This array has one entry per "character" in the "alphabet" (which for this method consists of just 0 and 1) + // ~0L means all bits set + long[] toFindMask = { ~0L, ~0L }; + for (int i = 0; i < toFindLength; ++i) { + toFindMask[toFind.get(i) ? 1 : 0] &= ~(1L << i); + } + for (int i = beginIndex; i < findInLength; ++i) { + state |= toFindMask[findIn.get(i) ? 1 : 0]; + state <<= 1; + if ((state & (1L << toFindLength)) == 0L) { + return i - toFindLength + 1; + } + } - /** - * Time format specifier(s) that will work with Logstash and Ingest pipeline date parsers. - */ - public final List jodaTimestampFormats; + return -1; + } + + /** + * Converts a string into a {@link BitSet} with one bit per character of the string and bits + * set to 1 if the corresponding character in the string is a digit and 0 if not. (The first + * character of the string corresponds to the least significant bit in the {@link BitSet}, so + * if the {@link BitSet} is printed in natural order it will be reversed compared to the input, + * and then the most significant bit will be printed first. However, in terms of random access + * to individual characters/bits, this "reversal" is by far the most intuitive representation.) + * @param str The string to be mapped. + * @return A {@link BitSet} suitable for use as input to {@link #findBitPattern}. + */ + static BitSet stringToNumberPosBitSet(String str) { + + BitSet result = new BitSet(); + for (int index = 0; index < str.length(); ++index) { + if (Character.isDigit(str.charAt(index))) { + result.set(index); + } + } + return result; + } + + /** + * Represents an overall format matched within the supplied samples. + * Similar {@link TimestampFormat}s can be merged when they can be + * recognised by the same Grok pattern, simple regular expression, and + * punctuation in the preface, but have different Java timestamp formats. + * + * Objects are immutable. Merges that result in changes return new + * objects. + */ + static final class TimestampFormat { /** - * Time format specifier(s) that will work with Logstash and Ingest pipeline date parsers. + * Java time formats that may contain indeterminate day/month patterns. */ - public final List javaTimestampFormats; + final List rawJavaTimestampFormats; /** * A simple regex that will work in many languages to detect whether the timestamp format * exists in a particular line. */ - public final Pattern simplePattern; + final Pattern simplePattern; /** - * Name of an out-of-the-box Grok pattern that will match the timestamp. + * Name of a Grok pattern that will match the timestamp. */ - public final String grokPatternName; + final String grokPatternName; /** - * Text that came after the timestamp in the matched field/message. + * If {@link #grokPatternName} is not an out-of-the-box Grok pattern, then its definition. + */ + final Map customGrokPatternDefinitions; + + /** + * The punctuation characters in the text preceeding the timestamp in the samples. */ - public final String epilogue; + final String prefacePunctuation; + + TimestampFormat(List rawJavaTimestampFormats, Pattern simplePattern, String grokPatternName, + Map customGrokPatternDefinitions, String prefacePunctuation) { + this.rawJavaTimestampFormats = Collections.unmodifiableList(rawJavaTimestampFormats); + this.simplePattern = Objects.requireNonNull(simplePattern); + this.grokPatternName = Objects.requireNonNull(grokPatternName); + this.customGrokPatternDefinitions = Objects.requireNonNull(customGrokPatternDefinitions); + this.prefacePunctuation = prefacePunctuation; + } + + boolean canMergeWith(TimestampFormat other) { + + if (this == other) { + return true; + } - TimestampMatch(int candidateIndex, String preface, String jodaTimestampFormat, String javaTimestampFormat, String simpleRegex, - String grokPatternName, String epilogue) { - this(candidateIndex, preface, Collections.singletonList(jodaTimestampFormat), Collections.singletonList(javaTimestampFormat), - simpleRegex, grokPatternName, epilogue); + return other != null && + this.simplePattern.pattern().equals(other.simplePattern.pattern()) && + this.grokPatternName.equals(other.grokPatternName) && + Objects.equals(this.customGrokPatternDefinitions, other.customGrokPatternDefinitions) && + this.prefacePunctuation.equals(other.prefacePunctuation); } - TimestampMatch(int candidateIndex, String preface, List jodaTimestampFormats, List javaTimestampFormats, - String simpleRegex, String grokPatternName, String epilogue) { - this(candidateIndex, preface, jodaTimestampFormats, javaTimestampFormats, Pattern.compile(simpleRegex), grokPatternName, - epilogue); + TimestampFormat mergeWith(TimestampFormat other) { + + if (canMergeWith(other)) { + if (rawJavaTimestampFormats.equals(other.rawJavaTimestampFormats) == false) { + // Do the merge like this to preserve ordering + Set mergedJavaTimestampFormats = new LinkedHashSet<>(rawJavaTimestampFormats); + if (mergedJavaTimestampFormats.addAll(other.rawJavaTimestampFormats)) { + return new TimestampFormat(new ArrayList<>(mergedJavaTimestampFormats), simplePattern, grokPatternName, + customGrokPatternDefinitions, prefacePunctuation); + } + } + // The merged format is exactly the same as this format, so there's no need to create a new object + return this; + } + + throw new IllegalArgumentException("Cannot merge timestamp format [" + this + "] with [" + other + "]"); + } + + @Override + public int hashCode() { + return Objects.hash(rawJavaTimestampFormats, simplePattern.pattern(), grokPatternName, customGrokPatternDefinitions, + prefacePunctuation); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + + TimestampFormat that = (TimestampFormat) other; + return Objects.equals(this.rawJavaTimestampFormats, that.rawJavaTimestampFormats) && + Objects.equals(this.simplePattern.pattern(), that.simplePattern.pattern()) && + Objects.equals(this.grokPatternName, that.grokPatternName) && + Objects.equals(this.customGrokPatternDefinitions, that.customGrokPatternDefinitions) && + Objects.equals(this.prefacePunctuation, that.prefacePunctuation); } - TimestampMatch(int candidateIndex, String preface, List jodaTimestampFormats, List javaTimestampFormats, - Pattern simplePattern, String grokPatternName, - String epilogue) { - this.candidateIndex = candidateIndex; - this.preface = preface; - this.jodaTimestampFormats = Collections.unmodifiableList(jodaTimestampFormats); - this.javaTimestampFormats = Collections.unmodifiableList(javaTimestampFormats); - this.simplePattern = simplePattern; - this.grokPatternName = grokPatternName; - this.epilogue = epilogue; + @Override + public String toString() { + return "Java timestamp formats = " + rawJavaTimestampFormats.stream().collect(Collectors.joining("', '", "[ '", "' ]")) + + ", simple pattern = '" + simplePattern.pattern() + "', grok pattern = '" + grokPatternName + "'" + + (customGrokPatternDefinitions.isEmpty() ? "" : ", custom grok pattern definitions = " + customGrokPatternDefinitions) + + ", preface punctuation = '" + prefacePunctuation + "'"; } + } + + /** + * Represents one match of a timestamp in one added sample. + */ + static final class TimestampMatch { + + // This picks out punctuation that is likely to represent a field separator. It deliberately + // leaves out punctuation that's most likely to vary between field values, such as dots. + private static final Pattern NON_PUNCTUATION_PATTERN = Pattern.compile("[^\\\\/|~:;,<>()\\[\\]{}«»\t]+"); + + // Used for deciding whether an ISO8601 timestamp contains a timezone. + private static final Pattern ISO8601_TIMEZONE_PATTERN = Pattern.compile("(Z|[+-]\\d{2}:?\\d{2})$"); /** - * Does the parsing the timestamp produce different results depending on the timezone of the parser? - * I.e., does the textual representation NOT define the timezone? + * Text that came before the timestamp in the matched field/message. */ - public boolean hasTimezoneDependentParsing() { - return javaTimestampFormats.stream().anyMatch(javaTimestampFormat -> - javaTimestampFormat.indexOf('X') == -1 && javaTimestampFormat.indexOf('z') == -1 && javaTimestampFormat.contains("mm")); - } + final String preface; /** - * Sometimes Elasticsearch mappings for dates need to include the format. - * This method returns appropriate mappings settings: at minimum "type"="date", - * and possibly also a "format" setting. + * Time format specifier(s) that will work with Logstash and Ingest pipeline date parsers. + */ + final TimestampFormat timestampFormat; + + /** + * These store the first and second numbers when the ordering of day and month is unclear, + * for example in 05/05/2019. Where the ordering is obvious they are set to -1. */ - public Map getEsDateMappingTypeWithFormat() { - if (javaTimestampFormats.contains("TAI64N")) { - // There's no format for TAI64N in the timestamp formats used in mappings - return Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"); + final int firstIndeterminateDateNumber; + final int secondIndeterminateDateNumber; + + final boolean hasTimezoneDependentParsing; + + /** + * Text that came after the timestamp in the matched field/message. + */ + final String epilogue; + + TimestampMatch(CandidateTimestampFormat chosenTimestampFormat, String preface, String matchedDate, String epilogue) { + this.preface = Objects.requireNonNull(preface); + this.timestampFormat = new TimestampFormat(chosenTimestampFormat.javaTimestampFormatSupplier.apply(matchedDate), + chosenTimestampFormat.simplePattern, chosenTimestampFormat.outputGrokPatternName, + chosenTimestampFormat.customGrokPatternDefinitions(), + preface.isEmpty() ? preface : NON_PUNCTUATION_PATTERN.matcher(preface).replaceAll("")); + int[] indeterminateDateNumbers = parseIndeterminateDateNumbers(matchedDate, timestampFormat.rawJavaTimestampFormats); + this.firstIndeterminateDateNumber = indeterminateDateNumbers[0]; + this.secondIndeterminateDateNumber = indeterminateDateNumbers[1]; + this.hasTimezoneDependentParsing = requiresTimezoneDependentParsing(timestampFormat.rawJavaTimestampFormats.get(0), + matchedDate); + this.epilogue = Objects.requireNonNull(epilogue); + } + + TimestampMatch(TimestampMatch toCopyExceptFormat, TimestampFormat timestampFormat) { + this.preface = toCopyExceptFormat.preface; + this.timestampFormat = Objects.requireNonNull(timestampFormat); + this.firstIndeterminateDateNumber = toCopyExceptFormat.firstIndeterminateDateNumber; + this.secondIndeterminateDateNumber = toCopyExceptFormat.secondIndeterminateDateNumber; + this.hasTimezoneDependentParsing = toCopyExceptFormat.hasTimezoneDependentParsing; + this.epilogue = toCopyExceptFormat.epilogue; + } + + static boolean requiresTimezoneDependentParsing(String format, String matchedDate) { + switch (format) { + case "ISO8601": + assert matchedDate.length() > 6; + return ISO8601_TIMEZONE_PATTERN.matcher(matchedDate).find(matchedDate.length() - 6) == false; + case "UNIX_MS": + case "UNIX": + case "TAI64N": + return false; + default: + boolean notQuoted = true; + for (int pos = 0; pos < format.length(); ++pos) { + char curChar = format.charAt(pos); + if (curChar == '\'') { + notQuoted = !notQuoted; + } else if (notQuoted && (curChar == 'X' || curChar == 'z')) { + return false; + } + } + return true; } - Map mapping = new LinkedHashMap<>(); - mapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); - String formats = javaTimestampFormats.stream().flatMap(format -> { - switch (format) { - case "ISO8601": - return Stream.empty(); - case "UNIX_MS": - return Stream.of("epoch_millis"); - case "UNIX": - return Stream.of("epoch_second"); - default: - return Stream.of(format); + } + + static int[] parseIndeterminateDateNumbers(String matchedDate, List rawJavaTimestampFormats) { + int[] indeterminateDateNumbers = { -1, -1 }; + + for (String rawJavaTimestampFormat : rawJavaTimestampFormats) { + + if (rawJavaTimestampFormat.indexOf(INDETERMINATE_FIELD_PLACEHOLDER) >= 0) { + + try { + // Parse leniently under the assumption the first sequence of hashes is day and the + // second is month - this may not be true but all we do is extract the numbers + String javaTimestampFormat = determiniseJavaTimestampFormat(rawJavaTimestampFormat, true); + + // TODO consider support for overriding the locale too + // But it's not clear-cut as Grok only knows English and German date + // words and for indeterminate formats we're expecting numbers anyway + DateTimeFormatter javaTimeFormatter = DateTimeFormatter.ofPattern(javaTimestampFormat, Locale.ROOT) + .withResolverStyle(ResolverStyle.LENIENT); + TemporalAccessor accessor = javaTimeFormatter.parse(matchedDate); + indeterminateDateNumbers[0] = accessor.get(ChronoField.DAY_OF_MONTH); + + // Now parse again leniently under the assumption the first sequence of hashes is month and the + // second is day - we have to do it twice and extract day as the lenient parser will wrap months > 12 + javaTimestampFormat = determiniseJavaTimestampFormat(rawJavaTimestampFormat, false); + + // TODO consider support for overriding the locale too + // But it's not clear-cut as Grok only knows English and German date + // words and for indeterminate formats we're expecting numbers anyway + javaTimeFormatter = DateTimeFormatter.ofPattern(javaTimestampFormat, Locale.ROOT) + .withResolverStyle(ResolverStyle.LENIENT); + accessor = javaTimeFormatter.parse(matchedDate); + indeterminateDateNumbers[1] = accessor.get(ChronoField.DAY_OF_MONTH); + if (indeterminateDateNumbers[0] > 0 && indeterminateDateNumbers[1] > 0) { + break; + } + } catch (DateTimeException e) { + // Move on to the next format + } } - }).collect(Collectors.joining("||")); - if (formats.isEmpty() == false) { - mapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, formats); } - return mapping; + + return indeterminateDateNumbers; } @Override public int hashCode() { - return Objects.hash(candidateIndex, preface, jodaTimestampFormats, javaTimestampFormats, simplePattern.pattern(), - grokPatternName, epilogue); + return Objects.hash(preface, timestampFormat, firstIndeterminateDateNumber, secondIndeterminateDateNumber, + hasTimezoneDependentParsing, epilogue); } @Override @@ -497,66 +1330,186 @@ public boolean equals(Object other) { } TimestampMatch that = (TimestampMatch) other; - return this.candidateIndex == that.candidateIndex && - Objects.equals(this.preface, that.preface) && - Objects.equals(this.jodaTimestampFormats, that.jodaTimestampFormats) && - Objects.equals(this.javaTimestampFormats, that.javaTimestampFormats) && - Objects.equals(this.simplePattern.pattern(), that.simplePattern.pattern()) && - Objects.equals(this.grokPatternName, that.grokPatternName) && + return Objects.equals(this.preface, that.preface) && + Objects.equals(this.timestampFormat, that.timestampFormat) && + this.firstIndeterminateDateNumber == that.firstIndeterminateDateNumber && + this.secondIndeterminateDateNumber == that.secondIndeterminateDateNumber && + this.hasTimezoneDependentParsing == that.hasTimezoneDependentParsing && Objects.equals(this.epilogue, that.epilogue); } @Override public String toString() { - return "index = " + candidateIndex + (preface.isEmpty() ? "" : ", preface = '" + preface + "'") + - ", Joda timestamp formats = " + jodaTimestampFormats.stream().collect(Collectors.joining("', '", "[ '", "' ]")) + - ", Java timestamp formats = " + javaTimestampFormats.stream().collect(Collectors.joining("', '", "[ '", "' ]")) + - ", simple pattern = '" + simplePattern.pattern() + "', grok pattern = '" + grokPatternName + "'" + - (epilogue.isEmpty() ? "" : ", epilogue = '" + epilogue + "'"); + return (preface.isEmpty() ? "" : "preface = '" + preface + "', ") + timestampFormat + + ((firstIndeterminateDateNumber > 0 || secondIndeterminateDateNumber > 0) + ? ", indeterminate date numbers = (" + firstIndeterminateDateNumber + "," + secondIndeterminateDateNumber + ")" + : "") + + ", has timezone-dependent parsing = " + hasTimezoneDependentParsing + + (epilogue.isEmpty() ? "" : ", epilogue = '" + epilogue + "'"); } } + /** + * Stores the details of a possible timestamp format to consider when looking for timestamps. + */ static final class CandidateTimestampFormat { - final List jodaTimestampFormats; - final List javaTimestampFormats; + private static final Pattern FRACTIONAL_SECOND_INTERPRETER = Pattern.compile("([" + FRACTIONAL_SECOND_SEPARATORS + "])(\\d{3,9})$"); + // This means that in the case of a literal Z, XXX is preferred + private static final Pattern TRAILING_OFFSET_WITHOUT_COLON_FINDER = Pattern.compile("[+-]\\d{4}$"); + + final Function> javaTimestampFormatSupplier; final Pattern simplePattern; + final String strictGrokPattern; final Grok strictSearchGrok; final Grok strictFullMatchGrok; - final String standardGrokPatternName; - final List quickRuleOutIndices; - - CandidateTimestampFormat(String jodaTimestampFormat, String javaTimestampFormat, String simpleRegex, String strictGrokPattern, - String standardGrokPatternName) { - this(Collections.singletonList(jodaTimestampFormat), Collections.singletonList(javaTimestampFormat), simpleRegex, - strictGrokPattern, standardGrokPatternName); - } + final String outputGrokPatternName; + final List quickRuleOutBitSets; + final int maxCharsBeforeQuickRuleOutMatch; + final int maxCharsAfterQuickRuleOutMatch; - CandidateTimestampFormat(String jodaTimestampFormat, String javaTimestampFormat, String simpleRegex, String strictGrokPattern, - String standardGrokPatternName, List quickRuleOutIndices) { - this(Collections.singletonList(jodaTimestampFormat), Collections.singletonList(javaTimestampFormat), simpleRegex, - strictGrokPattern, standardGrokPatternName, quickRuleOutIndices); + CandidateTimestampFormat(Function> javaTimestampFormatSupplier, String simpleRegex, String strictGrokPattern, + String outputGrokPatternName) { + this(javaTimestampFormatSupplier, simpleRegex, strictGrokPattern, outputGrokPatternName, Collections.emptyList(), + Integer.MAX_VALUE, Integer.MAX_VALUE); } - CandidateTimestampFormat(List jodaTimestampFormats, List javaTimestampFormats, String simpleRegex, - String strictGrokPattern, String standardGrokPatternName) { - this(jodaTimestampFormats, javaTimestampFormats, simpleRegex, strictGrokPattern, standardGrokPatternName, - Collections.emptyList()); + CandidateTimestampFormat(Function> javaTimestampFormatSupplier, String simpleRegex, String strictGrokPattern, + String outputGrokPatternName, String quickRuleOutPattern, int maxCharsBeforeQuickRuleOutMatch, + int maxCharsAfterQuickRuleOutMatch) { + this(javaTimestampFormatSupplier, simpleRegex, strictGrokPattern, outputGrokPatternName, + Collections.singletonList(quickRuleOutPattern), maxCharsBeforeQuickRuleOutMatch, maxCharsAfterQuickRuleOutMatch); } - CandidateTimestampFormat(List jodaTimestampFormats, List javaTimestampFormats, String simpleRegex, - String strictGrokPattern, String standardGrokPatternName, List quickRuleOutIndices) { - this.jodaTimestampFormats = jodaTimestampFormats; - this.javaTimestampFormats = javaTimestampFormats; + CandidateTimestampFormat(Function> javaTimestampFormatSupplier, String simpleRegex, String strictGrokPattern, + String outputGrokPatternName, List quickRuleOutPatterns, int maxCharsBeforeQuickRuleOutMatch, + int maxCharsAfterQuickRuleOutMatch) { + this.javaTimestampFormatSupplier = Objects.requireNonNull(javaTimestampFormatSupplier); this.simplePattern = Pattern.compile(simpleRegex, Pattern.MULTILINE); + this.strictGrokPattern = Objects.requireNonNull(strictGrokPattern); // The (?m) here has the Ruby meaning, which is equivalent to (?s) in Java this.strictSearchGrok = new Grok(Grok.getBuiltinPatterns(), "(?m)%{DATA:" + PREFACE + "}" + strictGrokPattern + "%{GREEDYDATA:" + EPILOGUE + "}", TimeoutChecker.watchdog); this.strictFullMatchGrok = new Grok(Grok.getBuiltinPatterns(), "^" + strictGrokPattern + "$", TimeoutChecker.watchdog); - this.standardGrokPatternName = standardGrokPatternName; - assert quickRuleOutIndices.stream() - .noneMatch(quickRuleOutIndex -> quickRuleOutIndex < 0 || quickRuleOutIndex >= QUICK_RULE_OUT_PATTERNS.size()); - this.quickRuleOutIndices = quickRuleOutIndices; + this.outputGrokPatternName = Objects.requireNonNull(outputGrokPatternName); + this.quickRuleOutBitSets = quickRuleOutPatterns.stream().map(TimestampFormatFinder::stringToNumberPosBitSet) + .collect(Collectors.toList()); + assert maxCharsBeforeQuickRuleOutMatch >= 0; + this.maxCharsBeforeQuickRuleOutMatch = maxCharsBeforeQuickRuleOutMatch; + assert maxCharsAfterQuickRuleOutMatch >= 0; + this.maxCharsAfterQuickRuleOutMatch = maxCharsAfterQuickRuleOutMatch; + } + + Map customGrokPatternDefinitions() { + return CUSTOM_TIMESTAMP_GROK_NAME.equals(outputGrokPatternName) + ? Collections.singletonMap(CUSTOM_TIMESTAMP_GROK_NAME, strictGrokPattern) + : Collections.emptyMap(); + } + + static List iso8601FormatFromExample(String example) { + + // The Elasticsearch ISO8601 parser requires a literal T between the date and time, so + // longhand formats are needed if there's a space instead + return (example.indexOf('T') >= 0) ? Collections.singletonList("ISO8601") : iso8601LikeFormatFromExample(example, " ", ""); + } + + static List iso8601LikeFormatFromExample(String example, String timeSeparator, String timezoneSeparator) { + + StringBuilder builder = new StringBuilder("yyyy-MM-dd"); + builder.append(timeSeparator).append("HH:mm"); + + // Seconds are optional in ISO8601 + if (example.length() > builder.length() && example.charAt(builder.length()) == ':') { + builder.append(":ss"); + } + + if (example.length() > builder.length()) { + + // Add fractional seconds pattern if appropriate + char nextChar = example.charAt(builder.length()); + if (FRACTIONAL_SECOND_SEPARATORS.indexOf(nextChar) >= 0) { + builder.append(nextChar); + for (int pos = builder.length(); pos < example.length(); ++pos) { + if (Character.isDigit(example.charAt(pos))) { + builder.append('S'); + } else { + break; + } + } + } + + // Add timezone if appropriate - in the case of a literal Z, XX is preferred + if (example.length() > builder.length()) { + builder.append(timezoneSeparator).append((example.indexOf(':', builder.length()) > 0) ? "XXX" : "XX"); + } + } else { + // This method should not have been called if the example didn't include the bare minimum of date and time + assert example.length() == builder.length() : "Expected [" + example + "] and [" + builder + "] to be the same length"; + } + + return Collections.singletonList(builder.toString()); + } + + static List adjustTrailingTimezoneFromExample(String example, String formatWithSecondsAndXX) { + return Collections.singletonList( + TRAILING_OFFSET_WITHOUT_COLON_FINDER.matcher(example).find() ? formatWithSecondsAndXX : formatWithSecondsAndXX + "X"); + } + + private static String adjustFractionalSecondsFromEndOfExample(String example, String formatNoFraction) { + + Matcher matcher = FRACTIONAL_SECOND_INTERPRETER.matcher(example); + return matcher.find() + ? (formatNoFraction + matcher.group(1).charAt(0) + "SSSSSSSSS".substring(0, matcher.group(2).length())) + : formatNoFraction; + } + + static List expandDayAndAdjustFractionalSecondsFromExample(String example, String formatWithddAndNoFraction) { + + String formatWithdd = adjustFractionalSecondsFromEndOfExample(example, formatWithddAndNoFraction); + return Arrays.asList(formatWithdd, formatWithdd.replace(" dd", " d")); + } + + static List indeterminateDayMonthFormatFromExample(String example) { + + StringBuilder builder = new StringBuilder(); + int examplePos = 0; + + // INDETERMINATE_FIELD_PLACEHOLDER here could represent either a day number (d) or month number (M) - it + // will get changed later based on evidence from many examples + for (Character patternChar + : Arrays.asList(INDETERMINATE_FIELD_PLACEHOLDER, INDETERMINATE_FIELD_PLACEHOLDER, 'y', 'H', 'm', 's')) { + + boolean foundDigit = false; + while (examplePos < example.length() && Character.isDigit(example.charAt(examplePos))) { + foundDigit = true; + builder.append(patternChar); + ++examplePos; + } + + if (patternChar == 's' || examplePos >= example.length() || foundDigit == false) { + break; + } + + builder.append(example.charAt(examplePos)); + ++examplePos; + } + + String format = builder.toString(); + // The Grok pattern should ensure we got at least as far as the year + assert format.contains("yy") : "Unexpected format [" + format + "] from example [" + example + "]"; + + if (examplePos < example.length()) { + // If we haven't consumed the whole example then we should have got as far as + // the (whole) seconds, and the bit afterwards should be the fractional seconds + assert builder.toString().endsWith("ss") : "Unexpected format [" + format + "] from example [" + example + "]"; + format = adjustFractionalSecondsFromEndOfExample(example, format); + } + + assert Character.isLetter(format.charAt(format.length() - 1)) + : "Unexpected format [" + format + "] from example [" + example + "]"; + assert format.length() == example.length() : "Unexpected format [" + format + "] from example [" + example + "]"; + + return Collections.singletonList(format); } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java index 53550ebf18dd3..d2572b7fd2085 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import org.w3c.dom.Document; import org.w3c.dom.NamedNodeMap; import org.w3c.dom.Node; @@ -79,6 +78,9 @@ static XmlFileStructureFinder makeXmlFileStructureFinder(List explanatio ++linesConsumed; } + // null to allow GC before timestamp search + sampleDocEnds = null; + // If we get here the XML parser should have confirmed this assert messagePrefix.charAt(0) == '<'; String topLevelTag = messagePrefix.substring(1); @@ -91,17 +93,17 @@ static XmlFileStructureFinder makeXmlFileStructureFinder(List explanatio .setNumMessagesAnalyzed(sampleRecords.size()) .setMultilineStartPattern("^\\s*<" + topLevelTag); - Tuple timeField = + Tuple timeField = FileStructureUtils.guessTimestampField(explanation, sampleRecords, overrides, timeoutChecker); if (timeField != null) { boolean needClientTimeZone = timeField.v2().hasTimezoneDependentParsing(); structureBuilder.setTimestampField(timeField.v1()) - .setJodaTimestampFormats(timeField.v2().jodaTimestampFormats) - .setJavaTimestampFormats(timeField.v2().javaTimestampFormats) + .setJodaTimestampFormats(timeField.v2().getJodaTimestampFormats()) + .setJavaTimestampFormats(timeField.v2().getJavaTimestampFormats()) .setNeedClientTimezone(needClientTimeZone) - .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, topLevelTag + "." + timeField.v1(), - timeField.v2().javaTimestampFormats, needClientTimeZone)); + .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), + topLevelTag + "." + timeField.v1(), timeField.v2().getJavaTimestampFormats(), needClientTimeZone)); } Tuple, SortedMap> mappingsAndFieldStats = diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java index 10bdf0d16d8eb..280a50324e447 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java @@ -42,7 +42,7 @@ public void testCreateConfigsGivenCompleteCsv() throws Exception { assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertEquals("^\"?time\"?,\"?message\"?", structure.getExcludeLinesPattern()); - assertEquals("^\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -77,7 +77,7 @@ public void testCreateConfigsGivenCompleteCsvAndColumnNamesOverride() throws Exc assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertEquals("^\"?time\"?,\"?message\"?", structure.getExcludeLinesPattern()); - assertEquals("^\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -147,7 +147,7 @@ public void testCreateConfigsGivenCsvWithIncompleteLastRecord() throws Exception assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertEquals("^\"?message\"?,\"?time\"?,\"?count\"?", structure.getExcludeLinesPattern()); - assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -185,7 +185,7 @@ public void testCreateConfigsGivenCsvWithTrailingNulls() throws Exception { "\"?RatecodeID\"?,\"?store_and_fwd_flag\"?,\"?PULocationID\"?,\"?DOLocationID\"?,\"?payment_type\"?,\"?fare_amount\"?," + "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?,\"?\"?,\"?\"?", structure.getExcludeLinesPattern()); - assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -230,7 +230,7 @@ public void testCreateConfigsGivenCsvWithTrailingNullsAndOverriddenTimeField() t "\"?RatecodeID\"?,\"?store_and_fwd_flag\"?,\"?PULocationID\"?,\"?DOLocationID\"?,\"?payment_type\"?,\"?fare_amount\"?," + "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?,\"?\"?,\"?\"?", structure.getExcludeLinesPattern()); - assertEquals("^.*?,.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^.*?,.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -270,7 +270,7 @@ public void testCreateConfigsGivenCsvWithTrailingNullsExceptHeader() throws Exce "\"?RatecodeID\"?,\"?store_and_fwd_flag\"?,\"?PULocationID\"?,\"?DOLocationID\"?,\"?payment_type\"?,\"?fare_amount\"?," + "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?", structure.getExcludeLinesPattern()); - assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); @@ -317,7 +317,7 @@ public void testCreateConfigsGivenCsvWithTrailingNullsExceptHeaderAndColumnNames "\"?RatecodeID\"?,\"?store_and_fwd_flag\"?,\"?PULocationID\"?,\"?DOLocationID\"?,\"?payment_type\"?,\"?fare_amount\"?," + "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?", structure.getExcludeLinesPattern()); - assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); + assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertEquals(Character.valueOf(','), structure.getDelimiter()); assertEquals(Character.valueOf('"'), structure.getQuote()); assertTrue(structure.getHasHeaderRow()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java index 264521e68fb51..c0adccd0eb477 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java @@ -7,7 +7,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.Arrays; import java.util.Collections; @@ -35,12 +34,12 @@ public void testMoreLikelyGivenKeyword() { public void testGuessTimestampGivenSingleSampleSingleField() { Map sample = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("ISO8601")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSingleSampleSingleFieldAndConsistentTimeFieldOverride() { @@ -48,12 +47,12 @@ public void testGuessTimestampGivenSingleSampleSingleFieldAndConsistentTimeField FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampField("field1").build(); Map sample = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), overrides, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("ISO8601")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSingleSampleSingleFieldAndImpossibleTimeFieldOverride() { @@ -73,12 +72,12 @@ public void testGuessTimestampGivenSingleSampleSingleFieldAndConsistentTimeForma FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("ISO8601").build(); Map sample = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), overrides, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("ISO8601")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSingleSampleSingleFieldAndImpossibleTimeFormatOverride() { @@ -97,18 +96,18 @@ public void testGuessTimestampGivenSingleSampleSingleFieldAndImpossibleTimeForma public void testGuessTimestampGivenSamplesWithSameSingleTimeField() { Map sample1 = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); Map sample2 = Collections.singletonMap("field1", "2018-05-24T17:33:39,406"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("ISO8601")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSamplesWithOneSingleTimeFieldDifferentFormat() { Map sample1 = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); - Map sample2 = Collections.singletonMap("field1", "2018-05-24 17:33:39,406"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Map sample2 = Collections.singletonMap("field1", "Thu May 24 17:33:39 2018"); + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNull(match); } @@ -116,7 +115,7 @@ public void testGuessTimestampGivenSamplesWithOneSingleTimeFieldDifferentFormat( public void testGuessTimestampGivenSamplesWithDifferentSingleTimeField() { Map sample1 = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); Map sample2 = Collections.singletonMap("another_field", "2018-05-24T17:33:39,406"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNull(match); } @@ -126,12 +125,12 @@ public void testGuessTimestampGivenSingleSampleManyFieldsOneTimeFormat() { sample.put("foo", "not a time"); sample.put("time", "2018-05-24 17:28:31,735"); sample.put("bar", 42); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("yyyy-MM-dd HH:mm:ss,SSS")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormat() { @@ -143,12 +142,12 @@ public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormat() { sample2.put("foo", "whatever"); sample2.put("time", "2018-05-29 11:53:02,837"); sample2.put("bar", 17); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("yyyy-MM-dd HH:mm:ss,SSS")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSamplesWithManyFieldsSameTimeFieldDifferentTimeFormat() { @@ -160,7 +159,7 @@ public void testGuessTimestampGivenSamplesWithManyFieldsSameTimeFieldDifferentTi sample2.put("foo", "whatever"); sample2.put("time", "May 29 2018 11:53:02"); sample2.put("bar", 17); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNull(match); } @@ -174,12 +173,12 @@ public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormatDist sample2.put("red_herring", "whatever"); sample2.put("time", "2018-05-29 11:53:02,837"); sample2.put("bar", 17); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd HH:mm:ss,SSS")); - assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("yyyy-MM-dd HH:mm:ss,SSS")); + assertEquals("TIMESTAMP_ISO8601", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormatDistractionAfter() { @@ -191,12 +190,12 @@ public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormatDist sample2.put("foo", "whatever"); sample2.put("time", "May 29 2018 11:53:02"); sample2.put("red_herring", "17"); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss")); - assertEquals("CISCOTIMESTAMP", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss")); + assertEquals("CISCOTIMESTAMP", match.v2().getGrokPatternName()); } public void testGuessTimestampGivenSamplesWithManyFieldsInconsistentTimeFields() { @@ -208,7 +207,7 @@ public void testGuessTimestampGivenSamplesWithManyFieldsInconsistentTimeFields() sample2.put("foo", "whatever"); sample2.put("time2", "May 29 2018 11:53:02"); sample2.put("bar", 42); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNull(match); } @@ -224,12 +223,12 @@ public void testGuessTimestampGivenSamplesWithManyFieldsInconsistentAndConsisten sample2.put("time2", "May 10 2018 11:53:02"); sample2.put("time3", "Thu, May 10 2018 11:53:02"); sample2.put("bar", 42); - Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), + Tuple match = FileStructureUtils.guessTimestampField(explanation, Arrays.asList(sample1, sample2), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time2", match.v1()); - assertThat(match.v2().javaTimestampFormats, contains("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss")); - assertEquals("CISCOTIMESTAMP", match.v2().grokPatternName); + assertThat(match.v2().getJavaTimestampFormats(), contains("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss")); + assertEquals("CISCOTIMESTAMP", match.v2().getGrokPatternName()); } public void testGuessMappingGivenNothing() { @@ -273,7 +272,9 @@ public void testGuessMappingGivenLong() { } public void testGuessMappingGivenDate() { - Map expected = Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + Map expected = new HashMap<>(); + expected.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expected.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); assertEquals(expected, guessMapping(explanation, "foo", Arrays.asList("2018-06-11T13:26:47Z", "2018-06-11T13:27:12Z"))); } @@ -347,18 +348,19 @@ public void testGuessMappingsAndCalculateFieldStats() { public void testMakeIngestPipelineDefinitionGivenStructuredWithoutTimestamp() { - assertNull(FileStructureUtils.makeIngestPipelineDefinition(null, null, null, false)); + assertNull(FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), null, null, false)); } @SuppressWarnings("unchecked") public void testMakeIngestPipelineDefinitionGivenStructuredWithTimestamp() { String timestampField = randomAlphaOfLength(10); - List timestampFormats = randomFrom(TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS).javaTimestampFormats; + List timestampFormats = randomFrom(Collections.singletonList("ISO8601"), + Arrays.asList("EEE MMM dd HH:mm:ss yyyy", "EEE MMM d HH:mm:ss yyyy")); boolean needClientTimezone = randomBoolean(); - Map pipeline = - FileStructureUtils.makeIngestPipelineDefinition(null, timestampField, timestampFormats, needClientTimezone); + Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(null, Collections.emptyMap(), timestampField, + timestampFormats, needClientTimezone); assertNotNull(pipeline); assertEquals("Ingest pipeline created by file structure finder", pipeline.remove("description")); @@ -382,11 +384,12 @@ public void testMakeIngestPipelineDefinitionGivenSemiStructured() { String grokPattern = randomAlphaOfLength(100); String timestampField = randomAlphaOfLength(10); - List timestampFormats = randomFrom(TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS).javaTimestampFormats; + List timestampFormats = randomFrom(Collections.singletonList("ISO8601"), + Arrays.asList("EEE MMM dd HH:mm:ss yyyy", "EEE MMM d HH:mm:ss yyyy")); boolean needClientTimezone = randomBoolean(); - Map pipeline = - FileStructureUtils.makeIngestPipelineDefinition(grokPattern, timestampField, timestampFormats, needClientTimezone); + Map pipeline = FileStructureUtils.makeIngestPipelineDefinition(grokPattern, Collections.emptyMap(), timestampField, + timestampFormats, needClientTimezone); assertNotNull(pipeline); assertEquals("Ingest pipeline created by file structure finder", pipeline.remove("description")); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreatorTests.java index dc48662fb35f7..7e6363602dcdd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/GrokPatternCreatorTests.java @@ -43,7 +43,7 @@ public void testPopulatePrefacesAndEpiloguesGivenTimestamp() { Collection prefaces = new ArrayList<>(); Collection epilogues = new ArrayList<>(); - candidate.processCaptures(fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null, NOOP_TIMEOUT_CHECKER); + candidate.processCaptures(explanation, fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null, NOOP_TIMEOUT_CHECKER); assertThat(prefaces, containsInAnyOrder("[", "[", "junk [", "[")); assertThat(epilogues, containsInAnyOrder("] DEBUG ", "] ERROR ", "] INFO ", "] DEBUG ")); @@ -60,7 +60,7 @@ public void testPopulatePrefacesAndEpiloguesGivenEmailAddress() { Collection prefaces = new ArrayList<>(); Collection epilogues = new ArrayList<>(); - candidate.processCaptures(fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null, NOOP_TIMEOUT_CHECKER); + candidate.processCaptures(explanation, fieldNameCountStore, matchingStrings, prefaces, epilogues, null, null, NOOP_TIMEOUT_CHECKER); assertThat(prefaces, containsInAnyOrder("before ", "abc ", "")); assertThat(epilogues, containsInAnyOrder(" after", " xyz", "")); @@ -73,7 +73,8 @@ public void testAppendBestGrokMatchForStringsGivenTimestampsAndLogLevels() { "junk [2018-01-22T07:33:23] INFO ", "[2018-01-21T03:33:23] DEBUG "); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); assertEquals(".*?\\[%{TIMESTAMP_ISO8601:extra_timestamp}\\] %{LOGLEVEL:loglevel} ", @@ -87,7 +88,8 @@ public void testAppendBestGrokMatchForStringsGivenNumbersInBrackets() { " (4)", " (-5) "); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); assertEquals(".*?\\(%{INT:field}\\).*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); @@ -99,7 +101,8 @@ public void testAppendBestGrokMatchForStringsGivenNegativeNumbersWithoutBreak() "prior to-3", "-4"); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); // It seems sensible that we don't detect these suffices as either base 10 or base 16 numbers @@ -113,7 +116,8 @@ public void testAppendBestGrokMatchForStringsGivenHexNumbers() { " -123", "1f is hex"); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); grokPatternCreator.appendBestGrokMatchForStrings(false, snippets, false, 0); assertEquals(".*?%{BASE16NUM:field}.*?", grokPatternCreator.getOverallGrokPatternBuilder().toString()); @@ -124,7 +128,8 @@ public void testAppendBestGrokMatchForStringsGivenHostnamesWithNumbers() { Collection snippets = Arrays.asList(" mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER); assertEquals("%{SYSLOGTIMESTAMP:timestamp} .*? .*?\\[%{INT:field}\\]: %{LOGLEVEL:loglevel} \\(.*? .*? .*?\\) .*? " + @@ -216,7 +225,7 @@ public void testCreateGrokPatternFromExamplesGivenCatalinaLogs() { "Invalid chunk ignored."); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER); assertEquals("%{CATALINA_DATESTAMP:timestamp} .*? .*?\\n%{LOGLEVEL:loglevel}: .*", @@ -239,7 +248,7 @@ public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogs() { "Info\tsshd\tsubsystem request for sftp"); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER); assertEquals("%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{TIMESTAMP_ISO8601:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" + @@ -247,7 +256,101 @@ public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogs() { grokPatternCreator.createGrokPatternFromExamples("TIMESTAMP_ISO8601", "timestamp")); assertEquals(5, mappings.size()); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); - assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date"), mappings.get("extra_timestamp")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + } + + public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogsAndIndeterminateFormat() { + + // Two timestamps: one ISO8601, one indeterminate day/month + Collection sampleMessages = Arrays.asList( + "559550912540598297\t2016-04-20T14:06:53\t20/04/2016 21:06:53,123456\t38545844\tserv02nw07\t192.168.114.28\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986880\t2016-04-20T14:06:53\t20/04/2016 21:06:53,123456\t9049724\tserv02nw03\t10.120.48.147\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986887\t2016-04-20T14:06:53\t20/04/2016 21:06:53,123456\t884343\tserv02tw03\t192.168.121.189\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912603512850\t2016-04-20T14:06:53\t20/04/2016 21:06:53,123456\t8907014\tserv02nw01\t192.168.118.208\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp"); + + Map mappings = new HashMap<>(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); + + assertEquals("%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{DATESTAMP:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" + + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:loglevel}\\t.*", + grokPatternCreator.createGrokPatternFromExamples("TIMESTAMP_ISO8601", "timestamp")); + assertEquals(5, mappings.size()); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "dd/MM/yyyy HH:mm:ss,SSSSSS"); + assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + } + + public void testCreateGrokPatternFromExamplesGivenMultiTimestampLogsAndCustomDefinition() { + + // Two timestamps: one custom, one built-in + Collection sampleMessages = Arrays.asList( + "559550912540598297\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t38545844\tserv02nw07\t192.168.114.28\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986880\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t9049724\tserv02nw03\t10.120.48.147\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986887\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t884343\tserv02tw03\t192.168.121.189\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912603512850\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t8907014\tserv02nw01\t192.168.118.208\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp"); + + Map mappings = new HashMap<>(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + Collections.singletonMap("CUSTOM_TIMESTAMP", "%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}(?:AM|PM)"), + NOOP_TIMEOUT_CHECKER); + + assertEquals("%{INT:field}\\t%{CUSTOM_TIMESTAMP:timestamp}\\t%{TIMESTAMP_ISO8601:extra_timestamp}\\t%{INT:field2}\\t.*?\\t" + + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:loglevel}\\t.*", + grokPatternCreator.createGrokPatternFromExamples("CUSTOM_TIMESTAMP", "timestamp")); + assertEquals(5, mappings.size()); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("extra_timestamp")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); + } + + public void testCreateGrokPatternFromExamplesGivenTimestampAndTimeWithoutDate() { + + // Two timestamps: one with date, one without + Collection sampleMessages = Arrays.asList( + "559550912540598297\t2016-04-20T14:06:53\t21:06:53.123456\t38545844\tserv02nw07\t192.168.114.28\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986880\t2016-04-20T14:06:53\t21:06:53.123456\t9049724\tserv02nw03\t10.120.48.147\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986887\t2016-04-20T14:06:53\t21:06:53.123456\t884343\tserv02tw03\t192.168.121.189\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912603512850\t2016-04-20T14:06:53\t21:06:53.123456\t8907014\tserv02nw01\t192.168.118.208\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp"); + + Map mappings = new HashMap<>(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); + + assertEquals("%{INT:field}\\t%{TIMESTAMP_ISO8601:timestamp}\\t%{TIME:time}\\t%{INT:field2}\\t.*?\\t" + + "%{IP:ipaddress}\\t.*?\\t%{LOGLEVEL:loglevel}\\t.*", + grokPatternCreator.createGrokPatternFromExamples("TIMESTAMP_ISO8601", "timestamp")); + assertEquals(5, mappings.size()); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("time")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("field2")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("ipaddress")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("loglevel")); @@ -273,7 +376,7 @@ public void testFindFullLineGrokPatternGivenApacheCombinedLogs() { "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36\""); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER); assertEquals(new Tuple<>("timestamp", "%{COMBINEDAPACHELOG}"), @@ -304,7 +407,8 @@ public void testAdjustForPunctuationGivenCommonPrefix() { ",\"rule1\",\"Accept\",\"\",\"\",\"\",\"0000000000000000\"" ); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); Collection adjustedSnippets = grokPatternCreator.adjustForPunctuation(snippets); assertEquals("\",", grokPatternCreator.getOverallGrokPatternBuilder().toString()); @@ -321,7 +425,8 @@ public void testAdjustForPunctuationGivenNoCommonPrefix() { "was added by 'User1'(id:2) to servergroup 'GAME'(id:9)" ); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, NOOP_TIMEOUT_CHECKER); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, snippets, null, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); Collection adjustedSnippets = grokPatternCreator.adjustForPunctuation(snippets); assertEquals("", grokPatternCreator.getOverallGrokPatternBuilder().toString()); @@ -346,18 +451,61 @@ public void testValidateFullLineGrokPatternGivenValid() { "559550912603512850\t2016-04-20T14:06:53\t2016-04-20T21:06:53Z\t8907014\tserv02nw01\t192.168.118.208\tAuthpriv\t" + "Info\tsshd\tsubsystem request for sftp"); + Map mappings = new HashMap<>(); + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), + NOOP_TIMEOUT_CHECKER); + + grokPatternCreator.validateFullLineGrokPattern(grokPattern, timestampField); + assertEquals(9, mappings.size()); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("serial_no")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("local_timestamp")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("user_id")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("host")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("client_ip")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("method")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("severity")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("program")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("message")); + } + + public void testValidateFullLineGrokPatternGivenValidAndCustomDefinition() { + + String timestampField = "local_timestamp"; + String grokPattern = "%{INT:serial_no}\\t%{CUSTOM_TIMESTAMP:local_timestamp}\\t%{TIMESTAMP_ISO8601:utc_timestamp}\\t" + + "%{INT:user_id}\\t%{HOSTNAME:host}\\t%{IP:client_ip}\\t%{WORD:method}\\t%{LOGLEVEL:severity}\\t%{PROG:program}\\t" + + "%{GREEDYDATA:message}"; + + // Two timestamps: one local, one UTC + Collection sampleMessages = Arrays.asList( + "559550912540598297\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t38545844\tserv02nw07\t192.168.114.28\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986880\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t9049724\tserv02nw03\t10.120.48.147\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986887\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t884343\tserv02tw03\t192.168.121.189\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912603512850\t4/20/2016 2:06PM\t2016-04-20T21:06:53Z\t8907014\tserv02nw01\t192.168.118.208\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp"); + Map mappings = new HashMap<>(); GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + Collections.singletonMap("CUSTOM_TIMESTAMP", "%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}(?:AM|PM)"), NOOP_TIMEOUT_CHECKER); grokPatternCreator.validateFullLineGrokPattern(grokPattern, timestampField); assertEquals(9, mappings.size()); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("serial_no")); - assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date"), mappings.get("local_timestamp")); + Map expectedDateMapping = new HashMap<>(); + expectedDateMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); + expectedDateMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "iso8601"); + assertEquals(expectedDateMapping, mappings.get("utc_timestamp")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("user_id")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("host")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "ip"), mappings.get("client_ip")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("method")); + assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("severity")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("program")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("message")); } @@ -376,7 +524,7 @@ public void testValidateFullLineGrokPatternGivenInvalid() { "Sep 8 11:55:42 linux named[22529]: error (unexpected RCODE REFUSED) resolving 'b.akamaiedge.net/A/IN': 95.110.64.205#53"); Map mappings = new HashMap<>(); - GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, + GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, null, Collections.emptyMap(), NOOP_TIMEOUT_CHECKER); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java index 6cf4d61cf176c..6ac672f61780e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java @@ -5,11 +5,9 @@ */ package org.elasticsearch.xpack.ml.filestructurefinder; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.Collections; import java.util.Set; @@ -20,90 +18,6 @@ public class TextLogFileStructureFinderTests extends FileStructureTestCase { - private static final String EXCEPTION_TRACE_SAMPLE = - "[2018-02-28T14:49:40,517][DEBUG][o.e.a.b.TransportShardBulkAction] [an_index][2] failed to execute bulk item " + - "(index) BulkShardRequest [[an_index][2]] containing [33] requests\n" + - "java.lang.IllegalArgumentException: Document contains at least one immense term in field=\"message.keyword\" (whose UTF8 " + - "encoding is longer than the max length 32766), all of which were skipped. Please correct the analyzer to not produce " + - "such terms. The prefix of the first immense term is: '[60, 83, 79, 65, 80, 45, 69, 78, 86, 58, 69, 110, 118, 101, 108, " + - "111, 112, 101, 32, 120, 109, 108, 110, 115, 58, 83, 79, 65, 80, 45]...', original message: bytes can be at most 32766 " + - "in length; got 49023\n" + - "\tat org.apache.lucene.index.DefaultIndexingChain$PerField.invert(DefaultIndexingChain.java:796) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.DefaultIndexingChain.processField(DefaultIndexingChain.java:430) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.DefaultIndexingChain.processDocument(DefaultIndexingChain.java:392) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.DocumentsWriterPerThread.updateDocument(DocumentsWriterPerThread.java:240) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.DocumentsWriter.updateDocument(DocumentsWriter.java:496) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.IndexWriter.updateDocument(IndexWriter.java:1729) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.apache.lucene.index.IndexWriter.addDocument(IndexWriter.java:1464) " + - "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + - "\tat org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:1070) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.engine.InternalEngine.indexIntoLucene(InternalEngine.java:1012) " + - "~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:878) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:738) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.shard.IndexShard.applyIndexOperation(IndexShard.java:707) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.shard.IndexShard.applyIndexOperationOnPrimary(IndexShard.java:673) " + - "~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequestOnPrimary(TransportShardBulkAction.java:548) " + - "~[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequest(TransportShardBulkAction.java:140) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeBulkItemRequest(TransportShardBulkAction.java:236) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.performOnPrimary(TransportShardBulkAction.java:123) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:110) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:72) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform" + - "(TransportReplicationAction.java:1034) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform" + - "(TransportReplicationAction.java:1012) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.ReplicationOperation.execute(ReplicationOperation.java:103) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse" + - "(TransportReplicationAction.java:359) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse" + - "(TransportReplicationAction.java:299) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse" + - "(TransportReplicationAction.java:975) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse" + - "(TransportReplicationAction.java:972) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.shard.IndexShardOperationPermits.acquire(IndexShardOperationPermits.java:238) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.index.shard.IndexShard.acquirePrimaryOperationPermit(IndexShard.java:2220) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction.acquirePrimaryShardReference" + - "(TransportReplicationAction.java:984) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction.access$500(TransportReplicationAction.java:98) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.doRun" + - "(TransportReplicationAction.java:320) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler" + - ".messageReceived(TransportReplicationAction.java:295) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler" + - ".messageReceived(TransportReplicationAction.java:282) [elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:66) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.transport.TransportService$7.doRun(TransportService.java:656) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:635) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) " + - "[elasticsearch-6.2.1.jar:6.2.1]\n" + - "\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_144]\n" + - "\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_144]\n" + - "\tat java.lang.Thread.run(Thread.java:748) [?:1.8.0_144]\n"; - private FileStructureFinderFactory factory = new TextLogFileStructureFinderFactory(); public void testCreateConfigsGivenElasticsearchLog() throws Exception { @@ -124,7 +38,7 @@ public void testCreateConfigsGivenElasticsearchLog() throws Exception { assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertNull(structure.getExcludeLinesPattern()); - assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", structure.getMultilineStartPattern()); + assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertNull(structure.getDelimiter()); assertNull(structure.getQuote()); assertNull(structure.getHasHeaderRow()); @@ -139,6 +53,47 @@ public void testCreateConfigsGivenElasticsearchLog() throws Exception { } } + public void testCreateConfigsGivenElasticsearchLogAndTimestampFormatOverride() throws Exception { + + String sample = "12/31/2018 1:40PM INFO foo\n" + + "1/31/2019 11:40AM DEBUG bar\n" + + "2/1/2019 11:00PM INFO foo\n" + + "2/2/2019 1:23AM DEBUG bar\n"; + + FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("M/d/yyyy h:mma").build(); + + assertTrue(factory.canCreateFromSample(explanation, sample)); + + String charset = randomFrom(POSSIBLE_CHARSETS); + Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); + FileStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker, overrides, + NOOP_TIMEOUT_CHECKER); + + FileStructure structure = structureFinder.getStructure(); + + assertEquals(FileStructure.Format.SEMI_STRUCTURED_TEXT, structure.getFormat()); + assertEquals(charset, structure.getCharset()); + if (hasByteOrderMarker == null) { + assertNull(structure.getHasByteOrderMarker()); + } else { + assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); + } + assertNull(structure.getExcludeLinesPattern()); + assertEquals("^\\d{1,2}/\\d{1,2}/\\d{4} \\d{1,2}:\\d{2}[AP]M\\b", structure.getMultilineStartPattern()); + assertNull(structure.getDelimiter()); + assertNull(structure.getQuote()); + assertNull(structure.getHasHeaderRow()); + assertNull(structure.getShouldTrimFields()); + assertEquals("%{CUSTOM_TIMESTAMP:timestamp} %{LOGLEVEL:loglevel} .*", structure.getGrokPattern()); + assertEquals("timestamp", structure.getTimestampField()); + assertEquals(Collections.singletonList("M/d/YYYY h:mma"), structure.getJodaTimestampFormats()); + FieldStats messageFieldStats = structure.getFieldStats().get("message"); + assertNotNull(messageFieldStats); + for (String statMessage : messageFieldStats.getTopHits().stream().map(m -> (String) m.get("value")).collect(Collectors.toList())) { + assertThat(structureFinder.getSampleMessages(), hasItem(statMessage)); + } + } + public void testCreateConfigsGivenElasticsearchLogAndTimestampFieldOverride() throws Exception { FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampField("my_time").build(); @@ -160,7 +115,7 @@ public void testCreateConfigsGivenElasticsearchLogAndTimestampFieldOverride() th assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertNull(structure.getExcludeLinesPattern()); - assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", structure.getMultilineStartPattern()); + assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertNull(structure.getDelimiter()); assertNull(structure.getQuote()); assertNull(structure.getHasHeaderRow()); @@ -197,7 +152,7 @@ public void testCreateConfigsGivenElasticsearchLogAndGrokPatternOverride() throw assertEquals(hasByteOrderMarker, structure.getHasByteOrderMarker()); } assertNull(structure.getExcludeLinesPattern()); - assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", structure.getMultilineStartPattern()); + assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", structure.getMultilineStartPattern()); assertNull(structure.getDelimiter()); assertNull(structure.getQuote()); assertNull(structure.getHasHeaderRow()); @@ -303,98 +258,4 @@ public void testCreateMultiLineMessageStartRegexGivenManyPrefacesIncludingEmpty( TextLogFileStructureFinder.createMultiLineMessageStartRegex(prefaces, simpleDateRegex)); } } - - public void testMostLikelyTimestampGivenAllSame() { - String sample = "[2018-06-27T11:59:22,125][INFO ][o.e.n.Node ] [node-0] initializing ...\n" + - "[2018-06-27T11:59:22,201][INFO ][o.e.e.NodeEnvironment ] [node-0] using [1] data paths, mounts [[/ (/dev/disk1)]], " + - "net usable_space [216.1gb], net total_space [464.7gb], types [hfs]\n" + - "[2018-06-27T11:59:22,202][INFO ][o.e.e.NodeEnvironment ] [node-0] heap size [494.9mb], " + - "compressed ordinary object pointers [true]\n" + - "[2018-06-27T11:59:22,204][INFO ][o.e.n.Node ] [node-0] node name [node-0], node ID [Ha1gD8nNSDqjd6PIyu3DJA]\n" + - "[2018-06-27T11:59:22,204][INFO ][o.e.n.Node ] [node-0] version[6.4.0-SNAPSHOT], pid[2785], " + - "build[default/zip/3c60efa/2018-06-26T14:55:15.206676Z], OS[Mac OS X/10.12.6/x86_64], " + - "JVM[\"Oracle Corporation\"/Java HotSpot(TM) 64-Bit Server VM/10/10+46]\n" + - "[2018-06-27T11:59:22,205][INFO ][o.e.n.Node ] [node-0] JVM arguments [-Xms1g, -Xmx1g, " + - "-XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, " + - "-XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, " + - "-XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, " + - "-Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, " + - "-Djava.io.tmpdir=/var/folders/k5/5sqcdlps5sg3cvlp783gcz740000h0/T/elasticsearch.nFUyeMH1, " + - "-XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, " + - "-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, " + - "-Djava.locale.providers=COMPAT, -Dio.netty.allocator.type=unpooled, -ea, -esa, -Xms512m, -Xmx512m, " + - "-Des.path.home=/Users/dave/elasticsearch/distribution/build/cluster/run node0/elasticsearch-6.4.0-SNAPSHOT, " + - "-Des.path.conf=/Users/dave/elasticsearch/distribution/build/cluster/run node0/elasticsearch-6.4.0-SNAPSHOT/config, " + - "-Des.distribution.flavor=default, -Des.distribution.type=zip]\n" + - "[2018-06-27T11:59:22,205][WARN ][o.e.n.Node ] [node-0] version [6.4.0-SNAPSHOT] is a pre-release version of " + - "Elasticsearch and is not suitable for production\n" + - "[2018-06-27T11:59:23,585][INFO ][o.e.p.PluginsService ] [node-0] loaded module [aggs-matrix-stats]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [analysis-common]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [ingest-common]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-expression]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-mustache]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-painless]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [mapper-extras]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [parent-join]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [percolator]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [rank-eval]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [reindex]\n" + - "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [repository-url]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [transport-netty4]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-core]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-deprecation]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-graph]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-logstash]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-ml]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-monitoring]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-rollup]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-security]\n" + - "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-sql]\n" + - "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-upgrade]\n" + - "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-watcher]\n" + - "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] no plugins loaded\n"; - - Tuple> mostLikelyMatch = - TextLogFileStructureFinder.mostLikelyTimestamp(sample.split("\n"), FileStructureOverrides.EMPTY_OVERRIDES, - NOOP_TIMEOUT_CHECKER); - assertNotNull(mostLikelyMatch); - assertEquals(new TimestampMatch(9, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), mostLikelyMatch.v1()); - } - - public void testMostLikelyTimestampGivenExceptionTrace() { - - Tuple> mostLikelyMatch = - TextLogFileStructureFinder.mostLikelyTimestamp(EXCEPTION_TRACE_SAMPLE.split("\n"), FileStructureOverrides.EMPTY_OVERRIDES, - NOOP_TIMEOUT_CHECKER); - assertNotNull(mostLikelyMatch); - - // Even though many lines have a timestamp near the end (in the Lucene version information), - // these are so far along the lines that the weight of the timestamp near the beginning of the - // first line should take precedence - assertEquals(new TimestampMatch(9, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), mostLikelyMatch.v1()); - } - - public void testMostLikelyTimestampGivenExceptionTraceAndTimestampFormatOverride() { - - FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("yyyy-MM-dd HH:mm:ss").build(); - - Tuple> mostLikelyMatch = - TextLogFileStructureFinder.mostLikelyTimestamp(EXCEPTION_TRACE_SAMPLE.split("\n"), overrides, NOOP_TIMEOUT_CHECKER); - assertNotNull(mostLikelyMatch); - - // The override should force the seemingly inferior choice of timestamp - assertEquals(new TimestampMatch(6, "", "YYYY-MM-dd HH:mm:ss", "yyyy-MM-dd HH:mm:ss", "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", - "TIMESTAMP_ISO8601", ""), mostLikelyMatch.v1()); - } - - public void testMostLikelyTimestampGivenExceptionTraceAndImpossibleTimestampFormatOverride() { - - FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("MMM dd HH:mm:ss").build(); - - Tuple> mostLikelyMatch = - TextLogFileStructureFinder.mostLikelyTimestamp(EXCEPTION_TRACE_SAMPLE.split("\n"), overrides, NOOP_TIMEOUT_CHECKER); - assertNull(mostLikelyMatch); - } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java index 0374ed6f34175..b80e8a5712aaa 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java @@ -6,9 +6,7 @@ package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.time.Instant; import java.time.ZoneId; @@ -18,306 +16,981 @@ import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalQueries; import java.util.Arrays; +import java.util.BitSet; +import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; +import java.util.regex.Pattern; public class TimestampFormatFinderTests extends FileStructureTestCase { - public void testFindFirstMatchGivenNoMatch() { - - assertNull(TimestampFormatFinder.findFirstMatch("", NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstMatch("no timestamps in here", NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstMatch(":::", NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstMatch("/+", NOOP_TIMEOUT_CHECKER)); - } - - public void testFindFirstMatchGivenOnlyIso8601() { - - validateTimestampMatch(new TimestampMatch(7, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15T16:14:56,374Z", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(7, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15T17:14:56,374+0100", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(8, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSSXXX", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15T17:14:56,374+01:00", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(9, "", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15T17:14:56,374", 1526400896374L); - - TimestampMatch pureIso8601Expected = new TimestampMatch(10, "", "ISO8601", "ISO8601", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", "TIMESTAMP_ISO8601", ""); - - validateTimestampMatch(pureIso8601Expected, "2018-05-15T16:14:56Z", 1526400896000L); - validateTimestampMatch(pureIso8601Expected, "2018-05-15T17:14:56+0100", 1526400896000L); - validateTimestampMatch(pureIso8601Expected, "2018-05-15T17:14:56+01:00", 1526400896000L); - validateTimestampMatch(pureIso8601Expected, "2018-05-15T17:14:56", 1526400896000L); - - validateTimestampMatch(new TimestampMatch(1, "", "YYYY-MM-dd HH:mm:ss,SSSZ", "yyyy-MM-dd HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15 16:14:56,374Z", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(1, "", "YYYY-MM-dd HH:mm:ss,SSSZ", "yyyy-MM-dd HH:mm:ss,SSSXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56,374+0100", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(2, "", "YYYY-MM-dd HH:mm:ss,SSSZZ", "yyyy-MM-dd HH:mm:ss,SSSXXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56,374+01:00", - 1526400896374L); - validateTimestampMatch(new TimestampMatch(3, "", "YYYY-MM-dd HH:mm:ss,SSS", "yyyy-MM-dd HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56,374", 1526400896374L); - validateTimestampMatch(new TimestampMatch(4, "", "YYYY-MM-dd HH:mm:ssZ", "yyyy-MM-dd HH:mm:ssXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", "TIMESTAMP_ISO8601", ""), "2018-05-15 16:14:56Z", 1526400896000L); - validateTimestampMatch(new TimestampMatch(4, "", "YYYY-MM-dd HH:mm:ssZ", "yyyy-MM-dd HH:mm:ssXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56+0100", 1526400896000L); - validateTimestampMatch(new TimestampMatch(5, "", "YYYY-MM-dd HH:mm:ssZZ", "yyyy-MM-dd HH:mm:ssXXX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56+01:00", 1526400896000L); - validateTimestampMatch(new TimestampMatch(6, "", "YYYY-MM-dd HH:mm:ss", "yyyy-MM-dd HH:mm:ss", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", "TIMESTAMP_ISO8601", ""), "2018-05-15 17:14:56", 1526400896000L); - } - - public void testFindFirstMatchGivenOnlyKnownTimestampFormat() { + private static final String EXCEPTION_TRACE_SAMPLE = + "[2018-02-28T14:49:40,517][DEBUG][o.e.a.b.TransportShardBulkAction] [an_index][2] failed to execute bulk item " + + "(index) BulkShardRequest [[an_index][2]] containing [33] requests\n" + + "java.lang.IllegalArgumentException: Document contains at least one immense term in field=\"message.keyword\" (whose UTF8 " + + "encoding is longer than the max length 32766), all of which were skipped. Please correct the analyzer to not produce " + + "such terms. The prefix of the first immense term is: '[60, 83, 79, 65, 80, 45, 69, 78, 86, 58, 69, 110, 118, 101, 108, " + + "111, 112, 101, 32, 120, 109, 108, 110, 115, 58, 83, 79, 65, 80, 45]...', original message: bytes can be at most 32766 " + + "in length; got 49023\n" + + "\tat org.apache.lucene.index.DefaultIndexingChain$PerField.invert(DefaultIndexingChain.java:796) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.DefaultIndexingChain.processField(DefaultIndexingChain.java:430) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.DefaultIndexingChain.processDocument(DefaultIndexingChain.java:392) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.DocumentsWriterPerThread.updateDocument(DocumentsWriterPerThread.java:240) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.DocumentsWriter.updateDocument(DocumentsWriter.java:496) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.IndexWriter.updateDocument(IndexWriter.java:1729) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.apache.lucene.index.IndexWriter.addDocument(IndexWriter.java:1464) " + + "~[lucene-core-7.2.1.jar:7.2.1 b2b6438b37073bee1fca40374e85bf91aa457c0b - ubuntu - 2018-01-10 00:48:43]\n" + + "\tat org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:1070) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.engine.InternalEngine.indexIntoLucene(InternalEngine.java:1012) " + + "~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:878) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:738) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.shard.IndexShard.applyIndexOperation(IndexShard.java:707) ~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.shard.IndexShard.applyIndexOperationOnPrimary(IndexShard.java:673) " + + "~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequestOnPrimary(TransportShardBulkAction.java:548) " + + "~[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequest(TransportShardBulkAction.java:140) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.executeBulkItemRequest(TransportShardBulkAction.java:236) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.performOnPrimary(TransportShardBulkAction.java:123) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:110) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnPrimary(TransportShardBulkAction.java:72) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform" + + "(TransportReplicationAction.java:1034) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryShardReference.perform" + + "(TransportReplicationAction.java:1012) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.ReplicationOperation.execute(ReplicationOperation.java:103) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse" + + "(TransportReplicationAction.java:359) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.onResponse" + + "(TransportReplicationAction.java:299) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse" + + "(TransportReplicationAction.java:975) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$1.onResponse" + + "(TransportReplicationAction.java:972) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.shard.IndexShardOperationPermits.acquire(IndexShardOperationPermits.java:238) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.index.shard.IndexShard.acquirePrimaryOperationPermit(IndexShard.java:2220) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction.acquirePrimaryShardReference" + + "(TransportReplicationAction.java:984) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction.access$500(TransportReplicationAction.java:98) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncPrimaryAction.doRun" + + "(TransportReplicationAction.java:320) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler" + + ".messageReceived(TransportReplicationAction.java:295) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.action.support.replication.TransportReplicationAction$PrimaryOperationTransportHandler" + + ".messageReceived(TransportReplicationAction.java:282) [elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:66) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.transport.TransportService$7.doRun(TransportService.java:656) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:635) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) " + + "[elasticsearch-6.2.1.jar:6.2.1]\n" + + "\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_144]\n" + + "\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_144]\n" + + "\tat java.lang.Thread.run(Thread.java:748) [?:1.8.0_144]\n"; + + public void testValidOverrideFormatToGrokAndRegex() { + + assertEquals(new Tuple<>("%{YEAR}-%{MONTHNUM2}-%{MONTHDAY}T%{HOUR}:%{MINUTE}:%{SECOND}%{ISO8601_TIMEZONE}", + "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}(?:Z|[+-]\\d{4})\\b"), + TimestampFormatFinder.overrideFormatToGrokAndRegex("yyyy-MM-dd'T'HH:mm:ss,SSSXX")); + assertEquals(new Tuple<>("%{MONTHDAY}\\.%{MONTHNUM2}\\.%{YEAR} %{HOUR}:%{MINUTE} (?:AM|PM)", + "\\b\\d{2}\\.\\d{2}\\.\\d{2} \\d{1,2}:\\d{2} [AP]M\\b"), + TimestampFormatFinder.overrideFormatToGrokAndRegex("dd.MM.yy h:mm a")); + assertEquals(new Tuple<>("%{MONTHNUM2}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ}", + "\\b\\d{2}/\\d{2}/\\d{4} \\d{1,2}:\\d{2}:\\d{2} [A-Z]{3}\\b"), + TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/yyyy H:mm:ss zzz")); + } + + public void testInvalidOverrideFormatToGrokAndRegex() { + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/yyyy\nH:mm:ss zzz")); + assertEquals("Multi-line timestamp formats [MM/dd/yyyy\nH:mm:ss zzz] not supported", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/YYYY H:mm:ss zzz")); + assertEquals("Letter group [YYYY] in [MM/dd/YYYY H:mm:ss zzz] is not supported", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/yyy H:mm:ss zzz")); + assertEquals("Letter group [yyy] in [MM/dd/yyy H:mm:ss zzz] is not supported", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/yyyy H:mm:ss+SSSSSS")); + assertEquals("Letter group [SSSSSS] in [MM/dd/yyyy H:mm:ss+SSSSSS] is not supported" + + " because it is not preceeded by [ss] and a separator from [:.,]", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex("MM/dd/yyyy H:mm,SSSSSS")); + assertEquals("Letter group [SSSSSS] in [MM/dd/yyyy H:mm,SSSSSS] is not supported" + + " because it is not preceeded by [ss] and a separator from [:.,]", e.getMessage()); + e = expectThrows(IllegalArgumentException.class, + () -> TimestampFormatFinder.overrideFormatToGrokAndRegex(" 'T' ")); + assertEquals("No time format letter groups in override format [ 'T' ]", e.getMessage()); + } + + public void testMakeCandidateFromOverrideFormat() { + + // Override is a special format + assertSame(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("ISO8601", NOOP_TIMEOUT_CHECKER)); + assertSame(TimestampFormatFinder.UNIX_MS_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("UNIX_MS", NOOP_TIMEOUT_CHECKER)); + assertSame(TimestampFormatFinder.UNIX_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("UNIX", NOOP_TIMEOUT_CHECKER)); + assertSame(TimestampFormatFinder.TAI64N_CANDIDATE_FORMAT, + TimestampFormatFinder.makeCandidateFromOverrideFormat("TAI64N", NOOP_TIMEOUT_CHECKER)); + + // Override is covered by a built-in format + TimestampFormatFinder.CandidateTimestampFormat candidate = + TimestampFormatFinder.makeCandidateFromOverrideFormat("yyyy-MM-dd'T'HH:mm:ss.SSS", NOOP_TIMEOUT_CHECKER); + assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.outputGrokPatternName, candidate.outputGrokPatternName); + assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.strictGrokPattern, candidate.strictGrokPattern); + // Can't compare Grok objects as Grok doesn't implement equals() + assertEquals(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT.simplePattern.pattern(), candidate.simplePattern.pattern()); + // Exact format supplied is returned if it matches + assertEquals(Collections.singletonList("yyyy-MM-dd'T'HH:mm:ss.SSS"), + candidate.javaTimestampFormatSupplier.apply("2018-05-15T16:14:56.374")); + // Other supported formats are returned if exact format doesn't match + assertEquals(Collections.singletonList("ISO8601"), candidate.javaTimestampFormatSupplier.apply("2018-05-15T16:14:56,374")); + + // Override is supported but not covered by any built-in format + candidate = + TimestampFormatFinder.makeCandidateFromOverrideFormat("MM/dd/yyyy H:mm:ss zzz", NOOP_TIMEOUT_CHECKER); + assertEquals(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, candidate.outputGrokPatternName); + assertEquals("%{MONTHNUM2}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ}", candidate.strictGrokPattern); + assertEquals("\\b\\d{2}/\\d{2}/\\d{4} \\d{1,2}:\\d{2}:\\d{2} [A-Z]{3}\\b", candidate.simplePattern.pattern()); + assertEquals(Collections.singletonList("MM/dd/yyyy H:mm:ss zzz"), + candidate.javaTimestampFormatSupplier.apply("05/15/2018 16:14:56 UTC")); + + candidate = + TimestampFormatFinder.makeCandidateFromOverrideFormat("M/d/yyyy H:mm:ss zzz", NOOP_TIMEOUT_CHECKER); + assertEquals(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, candidate.outputGrokPatternName); + assertEquals("%{MONTHNUM}/%{MONTHDAY}/%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ}", candidate.strictGrokPattern); + assertEquals("\\b\\d{1,2}/\\d{1,2}/\\d{4} \\d{1,2}:\\d{2}:\\d{2} [A-Z]{3}\\b", candidate.simplePattern.pattern()); + assertEquals(Collections.singletonList("M/d/yyyy H:mm:ss zzz"), + candidate.javaTimestampFormatSupplier.apply("5/15/2018 16:14:56 UTC")); + } + + public void testRequiresTimezoneDependentParsing() { + + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("ISO8601", "2018-05-15T17:14:56")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("ISO8601", "2018-05-15T17:14:56Z")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("ISO8601", "2018-05-15T17:14:56-0100")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("ISO8601", "2018-05-15T17:14:56+01:00")); + + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("UNIX_MS", "1526400896374")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("UNIX", "1526400896")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("TAI64N", "400000005afb078a164ac980")); + + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("EEE, dd MMM yyyy HH:mm:ss XXX", + "Tue, 15 May 2018 17:14:56 +01:00")); + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("yyyyMMddHHmmss", "20180515171456")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("EEE MMM dd yy HH:mm:ss zzz", + "Tue May 15 18 16:14:56 UTC")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("yyyy-MM-dd HH:mm:ss,SSS XX", + "2018-05-15 17:14:56,374 +0100")); + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("MMM dd HH:mm:ss.SSS", "May 15 17:14:56.725")); + + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("yyyy.MM.dd'zXz'HH:mm:ss", + "2018.05.15zXz17:14:56")); + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("yyyy.MM.dd HH:mm:ss'z'", + "2018.05.15 17:14:56z")); + assertTrue(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("'XX'yyyy.MM.dd HH:mm:ss", + "XX2018.05.15 17:14:56")); + assertFalse(TimestampFormatFinder.TimestampMatch.requiresTimezoneDependentParsing("'XX'yyyy.MM.dd HH:mm:ssXX", + "XX2018.05.15 17:14:56Z")); + } + + public void testParseIndeterminateDateNumbers() { + + // Simplest case - nothing is indeterminate + int[] indeterminateDateNumbers = + TimestampFormatFinder.TimestampMatch.parseIndeterminateDateNumbers("2018-05-15T16:14:56,374Z", + Collections.singletonList("yyyy-MM-dd'T'HH:mm:ss,SSSXX")); + assertEquals(2, indeterminateDateNumbers.length); + assertEquals(-1, indeterminateDateNumbers[0]); + assertEquals(-1, indeterminateDateNumbers[1]); + + // US with padding + indeterminateDateNumbers = + TimestampFormatFinder.TimestampMatch.parseIndeterminateDateNumbers("05/15/2018 16:14:56", + Collections.singletonList("??/??/yyyy HH:mm:ss")); + assertEquals(2, indeterminateDateNumbers.length); + assertEquals(5, indeterminateDateNumbers[0]); + assertEquals(15, indeterminateDateNumbers[1]); + + // US without padding + indeterminateDateNumbers = + TimestampFormatFinder.TimestampMatch.parseIndeterminateDateNumbers("5/15/2018 16:14:56", + Collections.singletonList("?/?/yyyy HH:mm:ss")); + assertEquals(2, indeterminateDateNumbers.length); + assertEquals(5, indeterminateDateNumbers[0]); + assertEquals(15, indeterminateDateNumbers[1]); + + // EU with padding + indeterminateDateNumbers = + TimestampFormatFinder.TimestampMatch.parseIndeterminateDateNumbers("15/05/2018 16:14:56", + Collections.singletonList("??/??/yyyy HH:mm:ss")); + assertEquals(2, indeterminateDateNumbers.length); + assertEquals(15, indeterminateDateNumbers[0]); + assertEquals(5, indeterminateDateNumbers[1]); + + // EU without padding + indeterminateDateNumbers = + TimestampFormatFinder.TimestampMatch.parseIndeterminateDateNumbers("15/5/2018 16:14:56", + Collections.singletonList("?/?/yyyy HH:mm:ss")); + assertEquals(2, indeterminateDateNumbers.length); + assertEquals(15, indeterminateDateNumbers[0]); + assertEquals(5, indeterminateDateNumbers[1]); + } + + public void testDeterminiseJavaTimestampFormat() { + + // Indeterminate at the beginning of the pattern + assertEquals("dd/MM/yyyy HH:mm:ss", TimestampFormatFinder.determiniseJavaTimestampFormat("??/??/yyyy HH:mm:ss", true)); + assertEquals("MM/dd/yyyy HH:mm:ss", TimestampFormatFinder.determiniseJavaTimestampFormat("??/??/yyyy HH:mm:ss", false)); + assertEquals("d/M/yyyy HH:mm:ss", TimestampFormatFinder.determiniseJavaTimestampFormat("?/?/yyyy HH:mm:ss", true)); + assertEquals("M/d/yyyy HH:mm:ss", TimestampFormatFinder.determiniseJavaTimestampFormat("?/?/yyyy HH:mm:ss", false)); + // Indeterminate in the middle of the pattern + assertEquals("HH:mm:ss dd/MM/yyyy", TimestampFormatFinder.determiniseJavaTimestampFormat("HH:mm:ss ??/??/yyyy", true)); + assertEquals("HH:mm:ss MM/dd/yyyy", TimestampFormatFinder.determiniseJavaTimestampFormat("HH:mm:ss ??/??/yyyy", false)); + assertEquals("HH:mm:ss d/M/yyyy", TimestampFormatFinder.determiniseJavaTimestampFormat("HH:mm:ss ?/?/yyyy", true)); + assertEquals("HH:mm:ss M/d/yyyy", TimestampFormatFinder.determiniseJavaTimestampFormat("HH:mm:ss ?/?/yyyy", false)); + // No separators + assertEquals("ddMMyyyyHHmmss", TimestampFormatFinder.determiniseJavaTimestampFormat("????yyyyHHmmss", true)); + assertEquals("MMddyyyyHHmmss", TimestampFormatFinder.determiniseJavaTimestampFormat("????yyyyHHmmss", false)); + // It's unreasonable to expect a variable length format like 'd' or 'M' to work without separators + } + + public void testGuessIsDayFirstFromFormats() { + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/5/2018 16:14:56"); + timestampFormatFinder.addSample("06/6/2018 17:14:56"); + timestampFormatFinder.addSample("07/7/2018 18:14:56"); + + // This is based on the fact that %{MONTHNUM} can match a single digit whereas %{MONTHDAY} cannot + assertTrue(timestampFormatFinder.guessIsDayFirstFromFormats(timestampFormatFinder.getRawJavaTimestampFormats())); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("5/05/2018 16:14:56"); + timestampFormatFinder.addSample("6/06/2018 17:14:56"); + timestampFormatFinder.addSample("7/07/2018 18:14:56"); + + // This is based on the fact that %{MONTHNUM} can match a single digit whereas %{MONTHDAY} cannot + assertFalse(timestampFormatFinder.guessIsDayFirstFromFormats(timestampFormatFinder.getRawJavaTimestampFormats())); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("5/05/2018 16:14:56"); + timestampFormatFinder.addSample("06/6/2018 17:14:56"); + timestampFormatFinder.addSample("7/07/2018 18:14:56"); + + // Inconsistent so no decision + assertNull(timestampFormatFinder.guessIsDayFirstFromFormats(timestampFormatFinder.getRawJavaTimestampFormats())); + } + + public void testGuessIsDayFirstFromMatchesSingleFormat() { + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("05/15/2018 17:14:56"); + timestampFormatFinder.addSample("05/25/2018 18:14:56"); + + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("15/05/2018 17:14:56"); + timestampFormatFinder.addSample("25/05/2018 18:14:56"); + + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("05/06/2018 17:14:56"); + timestampFormatFinder.addSample("05/07/2018 18:14:56"); + + // Second number has 3 values, first only 1, so guess second is day + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("06/05/2018 17:14:56"); + timestampFormatFinder.addSample("07/05/2018 18:14:56"); + + // First number has 3 values, second only 1, so guess first is day + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("06/06/2018 17:14:56"); + timestampFormatFinder.addSample("07/07/2018 18:14:56"); + + // Insufficient evidence to decide + assertNull(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + assertNull(timestampFormatFinder.guessIsDayFirstFromMatches(null)); + } + + public void testGuessIsDayFirstFromMatchesMultipleFormats() { + + // Similar to the test above, but with the possibility that the secondary + // ISO8601 formats cause confusion - this test proves that they don't + + TimestampFormatFinder.TimestampFormat expectedPrimaryFormat = + new TimestampFormatFinder.TimestampFormat(Collections.singletonList("??/??/yyyy HH:mm:ss"), + Pattern.compile("\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b"), "DATESTAMP", Collections.emptyMap(), ""); + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, false, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("2018-05-15T17:14:56"); + timestampFormatFinder.addSample("05/15/2018 17:14:56"); + timestampFormatFinder.addSample("2018-05-25T18:14:56"); + timestampFormatFinder.addSample("05/25/2018 18:14:56"); + + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, false, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("2018-05-15T17:14:56"); + timestampFormatFinder.addSample("15/05/2018 17:14:56"); + timestampFormatFinder.addSample("2018-05-25T18:14:56"); + timestampFormatFinder.addSample("25/05/2018 18:14:56"); + + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, false, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("2018-05-06T17:14:56"); + timestampFormatFinder.addSample("05/06/2018 17:14:56"); + timestampFormatFinder.addSample("2018-05-07T18:14:56"); + timestampFormatFinder.addSample("05/07/2018 18:14:56"); + + // Second number has 3 values, first only 1, so guess second is day + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + assertFalse(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, false, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("2018-05-06T17:14:56"); + timestampFormatFinder.addSample("06/05/2018 17:14:56"); + timestampFormatFinder.addSample("2018-05-07T18:14:56"); + timestampFormatFinder.addSample("07/05/2018 18:14:56"); + + // First number has 3 values, second only 1, so guess first is day + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + + timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, false, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.addSample("05/05/2018 16:14:56"); + timestampFormatFinder.addSample("2018-06-06T17:14:56"); + timestampFormatFinder.addSample("06/06/2018 17:14:56"); + timestampFormatFinder.addSample("2018-07-07T18:14:56"); + timestampFormatFinder.addSample("07/07/2018 18:14:56"); + + // Insufficient evidence to decide + assertNull(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + assertNull(timestampFormatFinder.guessIsDayFirstFromMatches(expectedPrimaryFormat)); + } + + public void testGuessIsDayFirstFromLocale() { + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + + // Locale fallback is the only way to decide + assertFalse(timestampFormatFinder.guessIsDayFirstFromLocale(Locale.US)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromLocale(Locale.UK)); + assertTrue(timestampFormatFinder.guessIsDayFirstFromLocale(Locale.FRANCE)); + assertFalse(timestampFormatFinder.guessIsDayFirstFromLocale(Locale.JAPAN)); + } + + public void testStringToNumberPosBitSet() { + + BitSet bitSet = TimestampFormatFinder.stringToNumberPosBitSet(""); + assertTrue(bitSet.isEmpty()); + assertEquals(0, bitSet.length()); + + bitSet = TimestampFormatFinder.stringToNumberPosBitSet(" 1"); + assertEquals(2, bitSet.length()); + assertFalse(bitSet.get(0)); + assertTrue(bitSet.get(1)); + + bitSet = TimestampFormatFinder.stringToNumberPosBitSet("1 1 1"); + assertEquals(5, bitSet.length()); + assertTrue(bitSet.get(0)); + assertFalse(bitSet.get(1)); + assertTrue(bitSet.get(2)); + assertFalse(bitSet.get(3)); + assertTrue(bitSet.get(4)); + + bitSet = TimestampFormatFinder.stringToNumberPosBitSet("05/05/2018 16:14:56"); + assertEquals(19, bitSet.length()); + assertTrue(bitSet.get(0)); + assertTrue(bitSet.get(1)); + assertFalse(bitSet.get(2)); + assertTrue(bitSet.get(3)); + assertTrue(bitSet.get(4)); + assertFalse(bitSet.get(5)); + assertTrue(bitSet.get(6)); + assertTrue(bitSet.get(7)); + assertTrue(bitSet.get(8)); + assertTrue(bitSet.get(9)); + assertFalse(bitSet.get(10)); + assertTrue(bitSet.get(11)); + assertTrue(bitSet.get(12)); + assertFalse(bitSet.get(13)); + assertTrue(bitSet.get(14)); + assertTrue(bitSet.get(15)); + assertFalse(bitSet.get(16)); + assertTrue(bitSet.get(17)); + assertTrue(bitSet.get(18)); + } + + public void testFindBitPattern() { + + BitSet findIn = TimestampFormatFinder.stringToNumberPosBitSet(""); + BitSet toFind = TimestampFormatFinder.stringToNumberPosBitSet(""); + assertEquals(0, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet(""); + assertEquals(0, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(1, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet("1"); + assertEquals(0, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet(" 1"); + assertEquals(1, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(1, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(3, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet("1 1"); + assertEquals(0, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + assertEquals(-1, TimestampFormatFinder.findBitPattern(findIn, 3, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 11 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet("11 1"); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(2, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + assertEquals(-1, TimestampFormatFinder.findBitPattern(findIn, 3, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 11 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet(" 11 1"); + assertEquals(1, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(1, TimestampFormatFinder.findBitPattern(findIn, 1, toFind)); + assertEquals(-1, TimestampFormatFinder.findBitPattern(findIn, 2, toFind)); + + findIn = TimestampFormatFinder.stringToNumberPosBitSet("1 11 1 1"); + toFind = TimestampFormatFinder.stringToNumberPosBitSet(" 1 1"); + assertEquals(4, TimestampFormatFinder.findBitPattern(findIn, 0, toFind)); + assertEquals(4, TimestampFormatFinder.findBitPattern(findIn, 4, toFind)); + assertEquals(-1, TimestampFormatFinder.findBitPattern(findIn, 5, toFind)); + } + + public void testFindBoundsForCandidate() { + + final TimestampFormatFinder.CandidateTimestampFormat httpdCandidateFormat = TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS + .stream().filter(candidate -> candidate.outputGrokPatternName.equals("HTTPDATE")).findAny().get(); + + BitSet numberPosBitSet = TimestampFormatFinder.stringToNumberPosBitSet("[2018-05-11T17:07:29,553][INFO ]" + + "[o.e.e.NodeEnvironment ] [node-0] heap size [3.9gb], compressed ordinary object pointers [true]"); + assertEquals(new Tuple<>(1, 36), + TimestampFormatFinder.findBoundsForCandidate(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT, numberPosBitSet)); + assertEquals(new Tuple<>(-1, -1), TimestampFormatFinder.findBoundsForCandidate(httpdCandidateFormat, numberPosBitSet)); + // TAI64N doesn't necessarily contain digits, so this functionality cannot guarantee that it won't match somewhere in the text + assertEquals(new Tuple<>(0, Integer.MAX_VALUE), + TimestampFormatFinder.findBoundsForCandidate(TimestampFormatFinder.TAI64N_CANDIDATE_FORMAT, numberPosBitSet)); + + numberPosBitSet = TimestampFormatFinder.stringToNumberPosBitSet("192.168.62.101 - - [29/Jun/2016:12:11:31 +0000] " + + "\"POST //apiserv:8080/engine/v2/jobs HTTP/1.1\" 201 42 \"-\" \"curl/7.46.0\" 384"); + assertEquals(new Tuple<>(-1, -1), + TimestampFormatFinder.findBoundsForCandidate(TimestampFormatFinder.ISO8601_CANDIDATE_FORMAT, numberPosBitSet)); + assertEquals(new Tuple<>(20, 46), TimestampFormatFinder.findBoundsForCandidate(httpdCandidateFormat, numberPosBitSet)); + assertEquals(new Tuple<>(0, Integer.MAX_VALUE), + TimestampFormatFinder.findBoundsForCandidate(TimestampFormatFinder.TAI64N_CANDIDATE_FORMAT, numberPosBitSet)); + } + + public void testFindFormatGivenNoMatch() { + + validateNoTimestampMatch(""); + validateNoTimestampMatch("no timestamps in here"); + validateNoTimestampMatch(":::"); + validateNoTimestampMatch("/+"); + } + + public void testFindFormatGivenOnlyIso8601() { + + validateTimestampMatch("2018-05-15T16:14:56,374Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896374L); + validateTimestampMatch("2018-05-15T17:14:56,374+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896374L); + validateTimestampMatch("2018-05-15T17:14:56,374+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896374L); + validateTimestampMatch("2018-05-15T17:14:56,374", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896374L); + + validateTimestampMatch("2018-05-15T16:14:56Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896000L); + validateTimestampMatch("2018-05-15T17:14:56+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896000L); + validateTimestampMatch("2018-05-15T17:14:56+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896000L); + validateTimestampMatch("2018-05-15T17:14:56", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400896000L); + + validateTimestampMatch("2018-05-15T16:14Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400840000L); + validateTimestampMatch("2018-05-15T17:14+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400840000L); + validateTimestampMatch("2018-05-15T17:14+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400840000L); + validateTimestampMatch("2018-05-15T17:14", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "ISO8601", 1526400840000L); + + // TIMESTAMP_ISO8601 doesn't match ISO8601 if it's only a date with no time + validateTimestampMatch("2018-05-15", "CUSTOM_TIMESTAMP", "\\b\\d{4}-\\d{2}-\\d{2}\\b", "ISO8601", 1526338800000L); + + validateTimestampMatch("2018-05-15 16:14:56,374Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSSXX", 1526400896374L); + validateTimestampMatch("2018-05-15 17:14:56,374+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSSXX", 1526400896374L); + validateTimestampMatch("2018-05-15 17:14:56,374+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSSXXX", 1526400896374L); + validateTimestampMatch("2018-05-15 17:14:56,374", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss,SSS", 1526400896374L); + + validateTimestampMatch("2018-05-15 16:14:56Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ssXX", 1526400896000L); + validateTimestampMatch("2018-05-15 17:14:56+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ssXX", 1526400896000L); + validateTimestampMatch("2018-05-15 17:14:56+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ssXXX", 1526400896000L); + validateTimestampMatch("2018-05-15 17:14:56", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm:ss", 1526400896000L); + + validateTimestampMatch("2018-05-15 16:14Z", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mmXX", 1526400840000L); + validateTimestampMatch("2018-05-15 17:14+0100", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mmXX", 1526400840000L); + validateTimestampMatch("2018-05-15 17:14+01:00", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mmXXX", 1526400840000L); + validateTimestampMatch("2018-05-15 17:14", "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", + "yyyy-MM-dd HH:mm", 1526400840000L); + } + + public void testFindFormatGivenOnlyKnownTimestampFormat() { // Note: some of the time formats give millisecond accuracy, some second accuracy and some minute accuracy - validateTimestampMatch(new TimestampMatch(0, "", "YYYY-MM-dd HH:mm:ss,SSS Z", "yyyy-MM-dd HH:mm:ss,SSS XX", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}", "TOMCAT_DATESTAMP", ""), "2018-05-15 17:14:56,374 +0100", - 1526400896374L); - - validateTimestampMatch(new TimestampMatch(11, "", "EEE MMM dd YYYY HH:mm:ss zzz", "EEE MMM dd yyyy HH:mm:ss zzz", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2} ", "DATESTAMP_RFC822", ""), - "Tue May 15 2018 16:14:56 UTC", 1526400896000L); - validateTimestampMatch(new TimestampMatch(12, "", "EEE MMM dd YYYY HH:mm zzz", "EEE MMM dd yyyy HH:mm zzz", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{4} \\d{2}:\\d{2} ", "DATESTAMP_RFC822", ""), - "Tue May 15 2018 16:14 UTC", 1526400840000L); - - validateTimestampMatch(new TimestampMatch(13, "", "EEE, dd MMM YYYY HH:mm:ss ZZ", "EEE, dd MMM yyyy HH:mm:ss XXX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2}:\\d{2} ", "DATESTAMP_RFC2822", ""), - "Tue, 15 May 2018 17:14:56 +01:00", 1526400896000L); - validateTimestampMatch(new TimestampMatch(14, "", "EEE, dd MMM YYYY HH:mm:ss Z", "EEE, dd MMM yyyy HH:mm:ss XX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2}:\\d{2} ", "DATESTAMP_RFC2822", ""), - "Tue, 15 May 2018 17:14:56 +0100", 1526400896000L); - validateTimestampMatch(new TimestampMatch(15, "", "EEE, dd MMM YYYY HH:mm ZZ", "EEE, dd MMM yyyy HH:mm XXX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2} ", "DATESTAMP_RFC2822", ""), - "Tue, 15 May 2018 17:14 +01:00", 1526400840000L); - validateTimestampMatch(new TimestampMatch(16, "", "EEE, dd MMM YYYY HH:mm Z", "EEE, dd MMM yyyy HH:mm XX", - "\\b[A-Z]\\S{2,8}, \\d{1,2} [A-Z]\\S{2,8} \\d{4} \\d{2}:\\d{2} ", "DATESTAMP_RFC2822", ""), "Tue, 15 May 2018 17:14 +0100", - 1526400840000L); - - validateTimestampMatch(new TimestampMatch(17, "", "EEE MMM dd HH:mm:ss zzz YYYY", "EEE MMM dd HH:mm:ss zzz yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2} [A-Z]{3,4} \\d{4}\\b", "DATESTAMP_OTHER", ""), - "Tue May 15 16:14:56 UTC 2018", 1526400896000L); - validateTimestampMatch(new TimestampMatch(18, "", "EEE MMM dd HH:mm zzz YYYY", "EEE MMM dd HH:mm zzz yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2} [A-Z]{3,4} \\d{4}\\b", "DATESTAMP_OTHER", ""), - "Tue May 15 16:14 UTC 2018", 1526400840000L); - - validateTimestampMatch(new TimestampMatch(19, "", "YYYYMMddHHmmss", "yyyyMMddHHmmss", "\\b\\d{14}\\b", - "DATESTAMP_EVENTLOG", ""), - "20180515171456", 1526400896000L); - - validateTimestampMatch(new TimestampMatch(20, "", "EEE MMM dd HH:mm:ss YYYY", "EEE MMM dd HH:mm:ss yyyy", - "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2} \\d{4}\\b", "HTTPDERROR_DATE", ""), - "Tue May 15 17:14:56 2018", 1526400896000L); - - validateTimestampMatch(new TimestampMatch(21, "", Arrays.asList("MMM dd HH:mm:ss.SSS", "MMM d HH:mm:ss.SSS"), - Arrays.asList("MMM dd HH:mm:ss.SSS", "MMM d HH:mm:ss.SSS"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\.\\d{3}", "SYSLOGTIMESTAMP", ""), "May 15 17:14:56.725", 1526400896725L); - validateTimestampMatch(new TimestampMatch(22, "", Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", "SYSLOGTIMESTAMP", ""), "May 15 17:14:56", 1526400896000L); - - validateTimestampMatch(new TimestampMatch(23, "", "dd/MMM/YYYY:HH:mm:ss Z", "dd/MMM/yyyy:HH:mm:ss XX", - "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", "HTTPDATE", ""), "15/May/2018:17:14:56 +0100", 1526400896000L); - - validateTimestampMatch(new TimestampMatch(24, "", "MMM dd, YYYY h:mm:ss a", "MMM dd, yyyy h:mm:ss a", - "\\b[A-Z]\\S{2,8} \\d{1,2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", "CATALINA_DATESTAMP", ""), "May 15, 2018 5:14:56 PM", - 1526400896000L); - - validateTimestampMatch(new TimestampMatch(25, "", Arrays.asList("MMM dd YYYY HH:mm:ss", "MMM d YYYY HH:mm:ss"), - Arrays.asList("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", "CISCOTIMESTAMP", ""), "May 15 2018 17:14:56", - 1526400896000L); - } - - public void testFindFirstMatchGivenOnlySystemDate() { - - assertEquals(new TimestampMatch(26, "", "UNIX_MS", "UNIX_MS", "\\b\\d{13}\\b", "POSINT", ""), - TimestampFormatFinder.findFirstMatch("1526400896374", NOOP_TIMEOUT_CHECKER)); - assertEquals(new TimestampMatch(26, "", "UNIX_MS", "UNIX_MS", "\\b\\d{13}\\b", "POSINT", ""), - TimestampFormatFinder.findFirstFullMatch("1526400896374", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(27, "", "UNIX", "UNIX", "\\b\\d{10}\\.\\d{3,9}\\b", "NUMBER", ""), - TimestampFormatFinder.findFirstMatch("1526400896.736", NOOP_TIMEOUT_CHECKER)); - assertEquals(new TimestampMatch(27, "", "UNIX", "UNIX", "\\b\\d{10}\\.\\d{3,9}\\b", "NUMBER", ""), - TimestampFormatFinder.findFirstFullMatch("1526400896.736", NOOP_TIMEOUT_CHECKER)); - assertEquals(new TimestampMatch(28, "", "UNIX", "UNIX", "\\b\\d{10}\\b", "POSINT", ""), - TimestampFormatFinder.findFirstMatch("1526400896", NOOP_TIMEOUT_CHECKER)); - assertEquals(new TimestampMatch(28, "", "UNIX", "UNIX", "\\b\\d{10}\\b", "POSINT", ""), - TimestampFormatFinder.findFirstFullMatch("1526400896", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(29, "", "TAI64N", "TAI64N", "\\b[0-9A-Fa-f]{24}\\b", "BASE16NUM", ""), - TimestampFormatFinder.findFirstMatch("400000005afb159a164ac980", NOOP_TIMEOUT_CHECKER)); - assertEquals(new TimestampMatch(29, "", "TAI64N", "TAI64N", "\\b[0-9A-Fa-f]{24}\\b", "BASE16NUM", ""), - TimestampFormatFinder.findFirstFullMatch("400000005afb159a164ac980", NOOP_TIMEOUT_CHECKER)); - } - - public void testFindFirstMatchGivenRealLogMessages() { - - assertEquals(new TimestampMatch(9, "[", "ISO8601", "yyyy-MM-dd'T'HH:mm:ss,SSS", - "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2},\\d{3}", "TIMESTAMP_ISO8601", - "][INFO ][o.e.e.NodeEnvironment ] [node-0] heap size [3.9gb], compressed ordinary object pointers [true]"), - TimestampFormatFinder.findFirstMatch("[2018-05-11T17:07:29,553][INFO ][o.e.e.NodeEnvironment ] [node-0] " + - "heap size [3.9gb], compressed ordinary object pointers [true]", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(23, "192.168.62.101 - - [", "dd/MMM/YYYY:HH:mm:ss Z", "dd/MMM/yyyy:HH:mm:ss XX", - "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", "HTTPDATE", - "] \"POST //apiserv:8080/engine/v2/jobs HTTP/1.1\" 201 42 \"-\" \"curl/7.46.0\" 384"), - TimestampFormatFinder.findFirstMatch("192.168.62.101 - - [29/Jun/2016:12:11:31 +0000] " + - "\"POST //apiserv:8080/engine/v2/jobs HTTP/1.1\" 201 42 \"-\" \"curl/7.46.0\" 384", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(24, "", "MMM dd, YYYY h:mm:ss a", "MMM dd, yyyy h:mm:ss a", - "\\b[A-Z]\\S{2,8} \\d{1,2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", "CATALINA_DATESTAMP", - " org.apache.tomcat.util.http.Parameters processParameters"), - TimestampFormatFinder.findFirstMatch("Aug 29, 2009 12:03:57 AM org.apache.tomcat.util.http.Parameters processParameters", - NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(22, "", Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", "SYSLOGTIMESTAMP", " esxi1.acme.com Vpxa: " + - "[3CB3FB90 verbose 'vpxavpxaInvtVm' opID=WFU-33d82c31] [VpxaInvtVmChangeListener] Guest DiskInfo Changed"), - TimestampFormatFinder.findFirstMatch("Oct 19 17:04:44 esxi1.acme.com Vpxa: [3CB3FB90 verbose 'vpxavpxaInvtVm' " + - "opID=WFU-33d82c31] [VpxaInvtVmChangeListener] Guest DiskInfo Changed", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(10, "559550912540598297\t", "ISO8601", "ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", - "TIMESTAMP_ISO8601", - "\t2016-04-20T21:06:53Z\t38545844\tserv02nw07\t192.168.114.28\tAuthpriv\tInfo\tsshd\tsubsystem request for sftp"), - TimestampFormatFinder.findFirstMatch("559550912540598297\t2016-04-20T14:06:53\t2016-04-20T21:06:53Z\t38545844\tserv02nw07\t" + - "192.168.114.28\tAuthpriv\tInfo\tsshd\tsubsystem request for sftp", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(22, "", Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), - "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", "SYSLOGTIMESTAMP", - " dnsserv named[22529]: error (unexpected RCODE REFUSED) resolving 'www.elastic.co/A/IN': 95.110.68.206#53"), - TimestampFormatFinder.findFirstMatch("Sep 8 11:55:35 dnsserv named[22529]: error (unexpected RCODE REFUSED) resolving " + - "'www.elastic.co/A/IN': 95.110.68.206#53", NOOP_TIMEOUT_CHECKER)); - - assertEquals(new TimestampMatch(3, "", "YYYY-MM-dd HH:mm:ss.SSSSSS", "yyyy-MM-dd HH:mm:ss.SSSSSS", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\\.\\d{3}", "TIMESTAMP_ISO8601", - "|INFO |VirtualServer |1 |client 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client " + - "'User1'(id:2) in channel '3er Instanz'(id:2)"), - TimestampFormatFinder.findFirstMatch("2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + - " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)", - NOOP_TIMEOUT_CHECKER)); + validateTimestampMatch("2018-05-15 17:14:56,374 +0100", "TOMCAT_DATESTAMP", + "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}[:.,]\\d{3}", "yyyy-MM-dd HH:mm:ss,SSS XX", 1526400896374L); + + validateTimestampMatch("Tue May 15 18 16:14:56 UTC", "DATESTAMP_RFC822", + "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{1,2} \\d{2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("EEE MMM dd yy HH:mm:ss zzz", "EEE MMM d yy HH:mm:ss zzz"), 1526400896000L); + + validateTimestampMatch("Tue, 15 May 2018 17:14:56 +01:00", "DATESTAMP_RFC2822", + "\\b[A-Z]\\S{2}, \\d{1,2} [A-Z]\\S{2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", "EEE, dd MMM yyyy HH:mm:ss XXX", 1526400896000L); + validateTimestampMatch("Tue, 15 May 2018 17:14:56 +0100", "DATESTAMP_RFC2822", + "\\b[A-Z]\\S{2}, \\d{1,2} [A-Z]\\S{2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", "EEE, dd MMM yyyy HH:mm:ss XX", 1526400896000L); + + validateTimestampMatch("Tue May 15 16:14:56 UTC 2018", "DATESTAMP_OTHER", + "\\b[A-Z]\\S{2,8} [A-Z]\\S{2,8} \\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("EEE MMM dd HH:mm:ss zzz yyyy", "EEE MMM d HH:mm:ss zzz yyyy"), 1526400896000L); + + validateTimestampMatch("20180515171456", "DATESTAMP_EVENTLOG", "\\b\\d{14}\\b", "yyyyMMddHHmmss", 1526400896000L); + + validateTimestampMatch("Tue May 15 17:14:56 2018", "HTTPDERROR_DATE", + "\\b[A-Z]\\S{2} [A-Z]\\S{2} \\d{2} \\d{2}:\\d{2}:\\d{2} \\d{4}\\b", "EEE MMM dd HH:mm:ss yyyy", 1526400896000L); + + validateTimestampMatch("May 15 17:14:56.725", "SYSLOGTIMESTAMP", "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("MMM dd HH:mm:ss.SSS", "MMM d HH:mm:ss.SSS"), 1526400896725L); + validateTimestampMatch("May 15 17:14:56", "SYSLOGTIMESTAMP", "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss"), 1526400896000L); + + validateTimestampMatch("15/May/2018:17:14:56 +0100", "HTTPDATE", "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", + "dd/MMM/yyyy:HH:mm:ss XX", 1526400896000L); + + validateTimestampMatch("May 15, 2018 5:14:56 PM", "CATALINA_DATESTAMP", + "\\b[A-Z]\\S{2} \\d{2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", "MMM dd, yyyy h:mm:ss a", 1526400896000L); + + validateTimestampMatch("May 15 2018 17:14:56", "CISCOTIMESTAMP", "\\b[A-Z]\\S{2} {1,2}\\d{1,2} \\d{4} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss"), 1526400896000L); + + validateTimestampMatch("05/15/2018 17:14:56,374", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "MM/dd/yyyy HH:mm:ss,SSS", 1526400896374L); + validateTimestampMatch("05-15-2018-17:14:56.374", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "MM-dd-yyyy-HH:mm:ss.SSS", 1526400896374L); + validateTimestampMatch("15/05/2018 17:14:56.374", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd/MM/yyyy HH:mm:ss.SSS", 1526400896374L); + validateTimestampMatch("15-05-2018-17:14:56,374", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd-MM-yyyy-HH:mm:ss,SSS", 1526400896374L); + validateTimestampMatch("15.05.2018 17:14:56.374", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd.MM.yyyy HH:mm:ss.SSS", 1526400896374L); + validateTimestampMatch("05/15/2018 17:14:56", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "MM/dd/yyyy HH:mm:ss", 1526400896000L); + validateTimestampMatch("05-15-2018-17:14:56", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "MM-dd-yyyy-HH:mm:ss", 1526400896000L); + validateTimestampMatch("15/05/2018 17:14:56", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd/MM/yyyy HH:mm:ss", 1526400896000L); + validateTimestampMatch("15-05-2018-17:14:56", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd-MM-yyyy-HH:mm:ss", 1526400896000L); + validateTimestampMatch("15.05.2018 17:14:56", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "dd.MM.yyyy HH:mm:ss", 1526400896000L); + + validateTimestampMatch("05/15/2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "MM/dd/yyyy", 1526338800000L); + validateTimestampMatch("05-15-2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "MM-dd-yyyy", 1526338800000L); + validateTimestampMatch("15/05/2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "dd/MM/yyyy", 1526338800000L); + validateTimestampMatch("15-05-2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "dd-MM-yyyy", 1526338800000L); + validateTimestampMatch("15.05.2018", "DATE", "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}\\b", "dd.MM.yyyy", 1526338800000L); + } + + public void testFindFormatGivenOnlySystemDate() { + + validateTimestampMatch("1526400896374", "POSINT", "\\b\\d{13}\\b", "UNIX_MS", 1526400896374L); + + validateTimestampMatch("1526400896.736", "NUMBER", "\\b\\d{10}\\b", "UNIX", 1526400896736L); + validateTimestampMatch("1526400896", "NUMBER", "\\b\\d{10}\\b", "UNIX", 1526400896000L); + + validateTimestampMatch("400000005afb078a164ac980", "BASE16NUM", "\\b[0-9A-Fa-f]{24}\\b", "TAI64N", 1526400896374L); + } + + public void testCustomOverrideMatchingBuiltInFormat() { + + String overrideFormat = "yyyy-MM-dd HH:mm:ss,SSS"; + String text = "2018-05-15 17:14:56,374"; + String expectedSimpleRegex = "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}"; + String expectedGrokPatternName = "TIMESTAMP_ISO8601"; + + TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder(explanation, overrideFormat, true, true, true, + NOOP_TIMEOUT_CHECKER); + strictTimestampFormatFinder.addSample(text); + assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); + assertEquals(Collections.emptyMap(), strictTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, strictTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), strictTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, strictTimestampFormatFinder.getNumMatchedFormats()); + + TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder(explanation, overrideFormat, false, false, false, + NOOP_TIMEOUT_CHECKER); + lenientTimestampFormatFinder.addSample(text); + lenientTimestampFormatFinder.selectBestMatch(); + assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); + assertEquals(Collections.emptyMap(), lenientTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, lenientTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), lenientTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); + } + + public void testCustomOverrideNotMatchingBuiltInFormat() { + + String overrideFormat = "MM/dd HH.mm.ss,SSSSSS 'in' yyyy"; + String text = "05/15 17.14.56,374946 in 2018"; + String expectedSimpleRegex = "\\b\\d{2}/\\d{2} \\d{2}\\.\\d{2}\\.\\d{2},\\d{6} in \\d{4}\\b"; + String expectedGrokPatternName = "CUSTOM_TIMESTAMP"; + Map expectedCustomGrokPatternDefinitions = + Collections.singletonMap(TimestampFormatFinder.CUSTOM_TIMESTAMP_GROK_NAME, + "%{MONTHNUM2}/%{MONTHDAY} %{HOUR}\\.%{MINUTE}\\.%{SECOND} in %{YEAR}"); + + TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder(explanation, overrideFormat, true, true, true, + NOOP_TIMEOUT_CHECKER); + strictTimestampFormatFinder.addSample(text); + assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); + assertEquals(expectedCustomGrokPatternDefinitions, strictTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, strictTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), strictTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, strictTimestampFormatFinder.getNumMatchedFormats()); + + TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder(explanation, overrideFormat, false, false, false, + NOOP_TIMEOUT_CHECKER); + lenientTimestampFormatFinder.addSample(text); + lenientTimestampFormatFinder.selectBestMatch(); + assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); + assertEquals(expectedCustomGrokPatternDefinitions, lenientTimestampFormatFinder.getCustomGrokPatternDefinitions()); + assertEquals(expectedSimpleRegex, lenientTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(Collections.singletonList(overrideFormat), lenientTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); + } + + public void testFindFormatGivenRealLogMessages() { + + validateFindInFullMessage("[2018-05-11T17:07:29,553][INFO ][o.e.e.NodeEnvironment ] [node-0] " + + "heap size [3.9gb], compressed ordinary object pointers [true]", "[", "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "ISO8601"); + + validateFindInFullMessage("192.168.62.101 - - [29/Jun/2016:12:11:31 +0000] " + + "\"POST //apiserv:8080/engine/v2/jobs HTTP/1.1\" 201 42 \"-\" \"curl/7.46.0\" 384", "192.168.62.101 - - [", "HTTPDATE", + "\\b\\d{2}/[A-Z]\\S{2}/\\d{4}:\\d{2}:\\d{2}:\\d{2} ", "dd/MMM/yyyy:HH:mm:ss XX"); + + validateFindInFullMessage("Aug 29, 2009 12:03:57 AM org.apache.tomcat.util.http.Parameters processParameters", "", + "CATALINA_DATESTAMP", "\\b[A-Z]\\S{2} \\d{2}, \\d{4} \\d{1,2}:\\d{2}:\\d{2} [AP]M\\b", "MMM dd, yyyy h:mm:ss a"); + + validateFindInFullMessage("Oct 19 17:04:44 esxi1.acme.com Vpxa: [3CB3FB90 verbose 'vpxavpxaInvtVm' " + + "opID=WFU-33d82c31] [VpxaInvtVmChangeListener] Guest DiskInfo Changed", "", "SYSLOGTIMESTAMP", + "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss")); + + validateFindInFullMessage("559550912540598297\t2016-04-20T14:06:53\t2016-04-20T21:06:53Z\t38545844\tserv02nw07\t" + + "192.168.114.28\tAuthpriv\tInfo\tsshd\tsubsystem request for sftp", "559550912540598297\t", "TIMESTAMP_ISO8601", + "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "ISO8601"); + + validateFindInFullMessage("Sep 8 11:55:35 dnsserv named[22529]: error (unexpected RCODE REFUSED) resolving " + + "'www.elastic.co/A/IN': 95.110.68.206#53", "", "SYSLOGTIMESTAMP", "\\b[A-Z]\\S{2,8} {1,2}\\d{1,2} \\d{2}:\\d{2}:\\d{2}\\b", + Arrays.asList("MMM dd HH:mm:ss", "MMM d HH:mm:ss")); + + validateFindInFullMessage("10-28-2016 16:22:47.636 +0200 ERROR Network - " + + "Error encountered for connection from src=192.168.0.1:12345. Local side shutting down", "", "DATESTAMP", + "\\b\\d{1,2}[/.-]\\d{1,2}[/.-]\\d{4}[- ]\\d{2}:\\d{2}:\\d{2}\\b", "MM-dd-yyyy HH:mm:ss.SSS"); + + validateFindInFullMessage("2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + + " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)", "", + "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "yyyy-MM-dd HH:mm:ss.SSSSSS"); // Differs from the above as the required format is specified - assertEquals(new TimestampMatch(3, "", "YYYY-MM-dd HH:mm:ss.SSSSSS", "yyyy-MM-dd HH:mm:ss.SSSSSS", - "\\b\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\\.\\d{3}", "TIMESTAMP_ISO8601", - "|INFO |VirtualServer |1 |client 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client " + - "'User1'(id:2) in channel '3er Instanz'(id:2)"), - TimestampFormatFinder.findFirstMatch("2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + - " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)", - randomFrom("YYYY-MM-dd HH:mm:ss.SSSSSS", "yyyy-MM-dd HH:mm:ss.SSSSSS"), NOOP_TIMEOUT_CHECKER)); + validateFindInFullMessage("yyyy-MM-dd HH:mm:ss.SSSSSS", "2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + + " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)", "", + "TIMESTAMP_ISO8601", "\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", "yyyy-MM-dd HH:mm:ss.SSSSSS"); // Non-matching required format specified - assertNull(TimestampFormatFinder.findFirstMatch("2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + - " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)", - randomFrom("UNIX", "EEE MMM dd YYYY HH:mm zzz"), NOOP_TIMEOUT_CHECKER)); - } - - public void testAdjustRequiredFormat() { - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSSSSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSSSSSSSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSSSSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS Z", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSSSSSSSS Z")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSS")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSSSSS")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss,SSSSSSSSS")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSS")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSSSSS")); - assertEquals("YYYY-MM-dd HH:mm:ss,SSS", TimestampFormatFinder.adjustRequiredFormat("YYYY-MM-dd HH:mm:ss.SSSSSSSSS")); - } - - public void testInterpretFractionalSeconds() { - assertEquals(new Tuple<>(',', 0), TimestampFormatFinder.interpretFractionalSeconds("Sep 8 11:55:35")); - assertEquals(new Tuple<>(',', 0), TimestampFormatFinder.interpretFractionalSeconds("29/Jun/2016:12:11:31 +0000")); - assertEquals(new Tuple<>('.', 6), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06 17:21:25.764368")); - assertEquals(new Tuple<>(',', 9), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764363438")); - assertEquals(new Tuple<>(',', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764")); - assertEquals(new Tuple<>('.', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25.764")); - assertEquals(new Tuple<>('.', 6), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06 17:21:25.764368Z")); - assertEquals(new Tuple<>(',', 9), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764363438Z")); - assertEquals(new Tuple<>(',', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764Z")); - assertEquals(new Tuple<>('.', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25.764Z")); - assertEquals(new Tuple<>('.', 6), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06 17:21:25.764368 Z")); - assertEquals(new Tuple<>(',', 9), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764363438 Z")); - assertEquals(new Tuple<>(',', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25,764 Z")); - assertEquals(new Tuple<>('.', 3), TimestampFormatFinder.interpretFractionalSeconds("2018-01-06T17:21:25.764 Z")); - } - - private void validateTimestampMatch(TimestampMatch expected, String text, long expectedEpochMs) { - - assertEquals(expected, TimestampFormatFinder.findFirstMatch(text, NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstFullMatch(text, NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstMatch(text, expected.candidateIndex, NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstFullMatch(text, expected.candidateIndex, NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstMatch(text, Integer.MAX_VALUE, NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstFullMatch(text, Integer.MAX_VALUE, NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstMatch(text, randomFrom(expected.jodaTimestampFormats), NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstFullMatch(text, randomFrom(expected.jodaTimestampFormats), - NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstMatch(text, randomFrom(expected.javaTimestampFormats), NOOP_TIMEOUT_CHECKER)); - assertEquals(expected, TimestampFormatFinder.findFirstFullMatch(text, randomFrom(expected.javaTimestampFormats), - NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstMatch(text, "wrong format", NOOP_TIMEOUT_CHECKER)); - assertNull(TimestampFormatFinder.findFirstFullMatch(text, "wrong format", NOOP_TIMEOUT_CHECKER)); - - validateJodaTimestampFormats(expected.jodaTimestampFormats, text, expectedEpochMs); - validateJavaTimestampFormats(expected.javaTimestampFormats, text, expectedEpochMs); - - assertTrue(expected.simplePattern.matcher(text).find()); - } - - // This is because parsing timestamps using Joda formats generates warnings. - // Eventually we'll probably just remove the checks that the Joda formats - // are valid, and at that point this method can be removed too. - protected boolean enableWarningsCheck() { - return false; - } - - // This method is using the Joda BWC layer. When that's removed, this method - // can be deleted - we'll just validate the Java time formats after that. - // Also remove enableWarningsCheck() above if this method is removed. - private void validateJodaTimestampFormats(List jodaTimestampFormats, String text, long expectedEpochMs) { + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, + randomFrom("UNIX", "EEE MMM dd yyyy HH:mm zzz"), false, false, false, NOOP_TIMEOUT_CHECKER); + timestampFormatFinder.addSample("2018-01-06 19:22:20.106822|INFO |VirtualServer |1 |client " + + " 'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel '3er Instanz'(id:2)"); + assertEquals(Collections.emptyList(), timestampFormatFinder.getJavaTimestampFormats()); + assertEquals(0, timestampFormatFinder.getNumMatchedFormats()); + } - // All the test times are for Tue May 15 2018 16:14:56 UTC, which is 17:14:56 in London. - // This is the timezone that will be used for any text representations that don't include it. - ZoneId defaultZone = ZoneId.of("Europe/London"); - long actualEpochMs; - for (int i = 0; i < jodaTimestampFormats.size(); ++i) { - try { - String timestampFormat = jodaTimestampFormats.get(i); - switch (timestampFormat) { - case "ISO8601": - actualEpochMs = Joda.forPattern("date_optional_time").withZone(defaultZone).parseMillis(text); - break; - default: - actualEpochMs = Joda.forPattern(timestampFormat).withYear(2018).withZone(defaultZone).parseMillis(text); - break; - } - if (expectedEpochMs == actualEpochMs) { - break; - } - // If the last one isn't right then propagate - if (i == jodaTimestampFormats.size() - 1) { - assertEquals(expectedEpochMs, actualEpochMs); - } - } catch (RuntimeException e) { - // If the last one throws then propagate - if (i == jodaTimestampFormats.size() - 1) { - throw e; - } - } + + public void testSelectBestMatchGivenAllSame() { + String sample = "[2018-06-27T11:59:22,125][INFO ][o.e.n.Node ] [node-0] initializing ...\n" + + "[2018-06-27T11:59:22,201][INFO ][o.e.e.NodeEnvironment ] [node-0] using [1] data paths, mounts [[/ (/dev/disk1)]], " + + "net usable_space [216.1gb], net total_space [464.7gb], types [hfs]\n" + + "[2018-06-27T11:59:22,202][INFO ][o.e.e.NodeEnvironment ] [node-0] heap size [494.9mb], " + + "compressed ordinary object pointers [true]\n" + + "[2018-06-27T11:59:22,204][INFO ][o.e.n.Node ] [node-0] node name [node-0], node ID [Ha1gD8nNSDqjd6PIyu3DJA]\n" + + "[2018-06-27T11:59:22,204][INFO ][o.e.n.Node ] [node-0] version[6.4.0-SNAPSHOT], pid[2785], " + + "build[default/zip/3c60efa/2018-06-26T14:55:15.206676Z], OS[Mac OS X/10.12.6/x86_64], " + + "JVM[\"Oracle Corporation\"/Java HotSpot(TM) 64-Bit Server VM/10/10+46]\n" + + "[2018-06-27T11:59:22,205][INFO ][o.e.n.Node ] [node-0] JVM arguments [-Xms1g, -Xmx1g, " + + "-XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, " + + "-XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, " + + "-XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, " + + "-Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, " + + "-Djava.io.tmpdir=/var/folders/k5/5sqcdlps5sg3cvlp783gcz740000h0/T/elasticsearch.nFUyeMH1, " + + "-XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, " + + "-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, " + + "-Djava.locale.providers=COMPAT, -Dio.netty.allocator.type=unpooled, -ea, -esa, -Xms512m, -Xmx512m, " + + "-Des.path.home=/Users/dave/elasticsearch/distribution/build/cluster/run node0/elasticsearch-6.4.0-SNAPSHOT, " + + "-Des.path.conf=/Users/dave/elasticsearch/distribution/build/cluster/run node0/elasticsearch-6.4.0-SNAPSHOT/config, " + + "-Des.distribution.flavor=default, -Des.distribution.type=zip]\n" + + "[2018-06-27T11:59:22,205][WARN ][o.e.n.Node ] [node-0] version [6.4.0-SNAPSHOT] is a pre-release version of " + + "Elasticsearch and is not suitable for production\n" + + "[2018-06-27T11:59:23,585][INFO ][o.e.p.PluginsService ] [node-0] loaded module [aggs-matrix-stats]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [analysis-common]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [ingest-common]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-expression]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-mustache]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [lang-painless]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [mapper-extras]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [parent-join]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [percolator]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [rank-eval]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [reindex]\n" + + "[2018-06-27T11:59:23,586][INFO ][o.e.p.PluginsService ] [node-0] loaded module [repository-url]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [transport-netty4]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-core]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-deprecation]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-graph]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-logstash]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-ml]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-monitoring]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-rollup]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-security]\n" + + "[2018-06-27T11:59:23,587][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-sql]\n" + + "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-upgrade]\n" + + "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] loaded module [x-pack-watcher]\n" + + "[2018-06-27T11:59:23,588][INFO ][o.e.p.PluginsService ] [node-0] no plugins loaded\n"; + + TimestampFormatFinder timestampFormatFinder = TextLogFileStructureFinder.populateTimestampFormatFinder(explanation, + sample.split("\n"), FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + timestampFormatFinder.selectBestMatch(); + assertEquals(Collections.singletonList("ISO8601"), timestampFormatFinder.getJavaTimestampFormats()); + assertEquals("TIMESTAMP_ISO8601", timestampFormatFinder.getGrokPatternName()); + assertEquals("\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", timestampFormatFinder.getSimplePattern().pattern()); + for (String preface : timestampFormatFinder.getPrefaces()) { + assertEquals("[", preface); + } + assertEquals(1, timestampFormatFinder.getNumMatchedFormats()); + } + + public void testSelectBestMatchGivenExceptionTrace() { + + TimestampFormatFinder timestampFormatFinder = TextLogFileStructureFinder.populateTimestampFormatFinder(explanation, + EXCEPTION_TRACE_SAMPLE.split("\n"), FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + + // Even though many lines have a timestamp near the end (in the Lucene version information), + // these are so far along the lines that the weight of the timestamp near the beginning of the + // first line should take precedence + timestampFormatFinder.selectBestMatch(); + assertEquals(Collections.singletonList("ISO8601"), timestampFormatFinder.getJavaTimestampFormats()); + assertEquals("TIMESTAMP_ISO8601", timestampFormatFinder.getGrokPatternName()); + assertEquals("\\b\\d{4}-\\d{2}-\\d{2}[T ]\\d{2}:\\d{2}", timestampFormatFinder.getSimplePattern().pattern()); + for (String preface : timestampFormatFinder.getPrefaces()) { + assertEquals("[", preface); } + assertEquals(2, timestampFormatFinder.getNumMatchedFormats()); + } + + public void testSelectBestMatchGivenExceptionTraceAndTimestampFormatOverride() { + + FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("yyyy-MM-dd HH:mm:ss").build(); + + TimestampFormatFinder timestampFormatFinder = TextLogFileStructureFinder.populateTimestampFormatFinder(explanation, + EXCEPTION_TRACE_SAMPLE.split("\n"), overrides, NOOP_TIMEOUT_CHECKER); + + // The override should force the seemingly inferior choice of timestamp + // TODO - this won't work any more :-( + } + + public void testSelectBestMatchGivenExceptionTraceAndImpossibleTimestampFormatOverride() { + + FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("MMM dd HH:mm:ss").build(); + + TimestampFormatFinder timestampFormatFinder = TextLogFileStructureFinder.populateTimestampFormatFinder(explanation, + EXCEPTION_TRACE_SAMPLE.split("\n"), overrides, NOOP_TIMEOUT_CHECKER); + + timestampFormatFinder.selectBestMatch(); + assertEquals(Collections.emptyList(), timestampFormatFinder.getJavaTimestampFormats()); + assertNull(timestampFormatFinder.getGrokPatternName()); + assertNull(timestampFormatFinder.getSimplePattern()); + assertEquals(Collections.emptyList(), timestampFormatFinder.getPrefaces()); + assertEquals(0, timestampFormatFinder.getNumMatchedFormats()); + } + + private void validateNoTimestampMatch(String text) { + + TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + expectThrows(IllegalArgumentException.class, () -> strictTimestampFormatFinder.addSample(text)); + assertEquals(0, strictTimestampFormatFinder.getNumMatchedFormats()); + + TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder(explanation, false, false, false, + NOOP_TIMEOUT_CHECKER); + lenientTimestampFormatFinder.addSample(text); + lenientTimestampFormatFinder.selectBestMatch(); + assertNull(lenientTimestampFormatFinder.getGrokPatternName()); + assertEquals(0, lenientTimestampFormatFinder.getNumMatchedFormats()); + } + + private void validateTimestampMatch(String text, String expectedGrokPatternName, String expectedSimpleRegex, + String expectedJavaTimestampFormat, long expectedEpochMs) { + validateTimestampMatch(text, expectedGrokPatternName, expectedSimpleRegex, Collections.singletonList(expectedJavaTimestampFormat), + expectedEpochMs); + } + + private void validateTimestampMatch(String text, String expectedGrokPatternName, String expectedSimpleRegex, + List expectedJavaTimestampFormats, long expectedEpochMs) { + + Pattern expectedSimplePattern = Pattern.compile(expectedSimpleRegex); + assertTrue(expectedSimplePattern.matcher(text).find()); + validateJavaTimestampFormats(expectedJavaTimestampFormats, text, expectedEpochMs); + + TimestampFormatFinder strictTimestampFormatFinder = new TimestampFormatFinder(explanation, true, true, true, NOOP_TIMEOUT_CHECKER); + strictTimestampFormatFinder.addSample(text); + assertEquals(expectedGrokPatternName, strictTimestampFormatFinder.getGrokPatternName()); + assertEquals(expectedSimplePattern.pattern(), strictTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(expectedJavaTimestampFormats, strictTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, strictTimestampFormatFinder.getNumMatchedFormats()); + + TimestampFormatFinder lenientTimestampFormatFinder = new TimestampFormatFinder(explanation, false, false, false, + NOOP_TIMEOUT_CHECKER); + lenientTimestampFormatFinder.addSample(text); + lenientTimestampFormatFinder.selectBestMatch(); + assertEquals(expectedGrokPatternName, lenientTimestampFormatFinder.getGrokPatternName()); + assertEquals(expectedSimplePattern.pattern(), lenientTimestampFormatFinder.getSimplePattern().pattern()); + assertEquals(expectedJavaTimestampFormats, lenientTimestampFormatFinder.getJavaTimestampFormats()); + assertEquals(1, lenientTimestampFormatFinder.getNumMatchedFormats()); + } + + private void validateFindInFullMessage(String message, String expectedPreface, String expectedGrokPatternName, + String expectedSimpleRegex, String expectedJavaTimestampFormat) { + validateFindInFullMessage(message, expectedPreface, expectedGrokPatternName, expectedSimpleRegex, + Collections.singletonList(expectedJavaTimestampFormat)); + } + + private void validateFindInFullMessage(String timestampFormatOverride, String message, String expectedPreface, + String expectedGrokPatternName, String expectedSimpleRegex, + String expectedJavaTimestampFormat) { + validateFindInFullMessage(timestampFormatOverride, message, expectedPreface, expectedGrokPatternName, expectedSimpleRegex, + Collections.singletonList(expectedJavaTimestampFormat)); + } + + private void validateFindInFullMessage(String message, String expectedPreface, String expectedGrokPatternName, + String expectedSimpleRegex, List expectedJavaTimestampFormats) { + validateFindInFullMessage(null, message, expectedPreface, expectedGrokPatternName, expectedSimpleRegex, + expectedJavaTimestampFormats); + } + + private void validateFindInFullMessage(String timestampFormatOverride, String message, String expectedPreface, + String expectedGrokPatternName, String expectedSimpleRegex, + List expectedJavaTimestampFormats) { + + Pattern expectedSimplePattern = Pattern.compile(expectedSimpleRegex); + assertTrue(expectedSimplePattern.matcher(message).find()); + + TimestampFormatFinder timestampFormatFinder = new TimestampFormatFinder(explanation, timestampFormatOverride, false, false, false, + NOOP_TIMEOUT_CHECKER); + timestampFormatFinder.addSample(message); + timestampFormatFinder.selectBestMatch(); + assertEquals(expectedGrokPatternName, timestampFormatFinder.getGrokPatternName()); + assertEquals(expectedSimplePattern.pattern(), timestampFormatFinder.getSimplePattern().pattern()); + assertEquals(expectedJavaTimestampFormats, timestampFormatFinder.getJavaTimestampFormats()); + assertEquals(Collections.singletonList(expectedPreface), timestampFormatFinder.getPrefaces()); + assertEquals(1, timestampFormatFinder.getNumMatchedFormats()); } private void validateJavaTimestampFormats(List javaTimestampFormats, String text, long expectedEpochMs) { @@ -325,18 +998,35 @@ private void validateJavaTimestampFormats(List javaTimestampFormats, Str // All the test times are for Tue May 15 2018 16:14:56 UTC, which is 17:14:56 in London. // This is the timezone that will be used for any text representations that don't include it. ZoneId defaultZone = ZoneId.of("Europe/London"); - TemporalAccessor parsed; + long actualEpochMs; for (int i = 0; i < javaTimestampFormats.size(); ++i) { try { String timestampFormat = javaTimestampFormats.get(i); switch (timestampFormat) { case "ISO8601": - parsed = DateFormatter.forPattern("strict_date_optional_time_nanos").withZone(defaultZone).parse(text); + actualEpochMs = DateFormatter.forPattern("iso8601").withZone(defaultZone).parseMillis(text); + break; + case "UNIX_MS": + actualEpochMs = Long.parseLong(text); + break; + case "UNIX": + actualEpochMs = (long) (Double.parseDouble(text) * 1000.0); + break; + case "TAI64N": + actualEpochMs = parseMillisFromTai64n(text); break; default: - DateTimeFormatter parser = new DateTimeFormatterBuilder() - .appendPattern(timestampFormat).parseDefaulting(ChronoField.YEAR_OF_ERA, 2018) - .toFormatter(Locale.ROOT); + DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().appendPattern(timestampFormat); + if (timestampFormat.indexOf('y') == -1) { + builder.parseDefaulting(ChronoField.YEAR_OF_ERA, 2018); + } + if (timestampFormat.indexOf('m') == -1) { + // All formats tested have either both or neither of hour and minute + builder.parseDefaulting(ChronoField.HOUR_OF_DAY, 0); + builder.parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0); + // Seconds automatically defaults to 0 + } + DateTimeFormatter parser = builder.toFormatter(Locale.ROOT); // This next line parses the textual date without any default timezone, so if // the text doesn't contain the timezone then the resulting temporal accessor // will be incomplete (i.e. impossible to convert to an Instant). You would @@ -346,15 +1036,15 @@ private void validateJavaTimestampFormats(List javaTimestampFormats, Str // from the text. The solution is to parse twice, once without a default // timezone and then again with a default timezone if the first parse didn't // find one in the text. - parsed = parser.parse(text); + TemporalAccessor parsed = parser.parse(text); if (parsed.query(TemporalQueries.zone()) == null) { // TODO: when Java 8 is no longer supported remove the two // lines and comment above and the closing brace below parsed = parser.withZone(defaultZone).parse(text); } + actualEpochMs = Instant.from(parsed).toEpochMilli(); break; } - long actualEpochMs = Instant.from(parsed).toEpochMilli(); if (expectedEpochMs == actualEpochMs) { break; } @@ -370,4 +1060,17 @@ private void validateJavaTimestampFormats(List javaTimestampFormats, Str } } } + + /** + * Logic copied from {@code org.elasticsearch.ingest.common.DateFormat.Tai64n.parseMillis}. + */ + private long parseMillisFromTai64n(String tai64nDate) { + if (tai64nDate.startsWith("@")) { + tai64nDate = tai64nDate.substring(1); + } + assertEquals(24, tai64nDate.length()); + long seconds = Long.parseLong(tai64nDate.substring(1, 16), 16); + long nanos = Long.parseLong(tai64nDate.substring(16, 24), 16); + return (seconds * 1000) - 10000 + nanos / 1000000; + } } From 6e307d9fee2cf6472a2877116aa7e0e419aec907 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 23 May 2019 14:05:38 -0700 Subject: [PATCH 237/321] [DOCS] Removes inclusion of java.asciidoc (#42459) --- .../separating-node-client-traffic.asciidoc | 3 +-- .../security/securing-communications/setting-up-ssl.asciidoc | 3 --- docs/reference/setup/setup-xclient.asciidoc | 3 --- x-pack/docs/en/security/ccs-clients-integrations.asciidoc | 4 ---- x-pack/docs/en/watcher/index.asciidoc | 3 --- 5 files changed, 1 insertion(+), 15 deletions(-) diff --git a/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc b/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc index 2eab8e0ae5adb..61ebd3e682594 100644 --- a/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc +++ b/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc @@ -65,5 +65,4 @@ transport.profiles.client.xpack.security.ssl.client_authentication: none This setting keeps certificate authentication active for node-to-node traffic, but removes the requirement to distribute a signed certificate to transport -clients. For more information, see -{stack-ov}/java-clients.html#transport-client[Configuring the Transport Client to work with a Secured Cluster]. +clients. \ No newline at end of file diff --git a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc index 68eda2cdc3e09..30d206c41b1ba 100644 --- a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc +++ b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc @@ -32,8 +32,5 @@ the {kib} server and to connect to {es} via HTTPS. See . Configure Beats to use encrypted connections. See <>. -. Configure the Java transport client to use encrypted communications. -See <>. - . Configure {es} for Apache Hadoop to use secured transport. See {hadoop-ref}/security.html[{es} for Apache Hadoop Security]. diff --git a/docs/reference/setup/setup-xclient.asciidoc b/docs/reference/setup/setup-xclient.asciidoc index 24cef9c736966..a192aeb6ea39a 100644 --- a/docs/reference/setup/setup-xclient.asciidoc +++ b/docs/reference/setup/setup-xclient.asciidoc @@ -111,6 +111,3 @@ Then in your project's `pom.xml` if using maven, add the following repositories -------------------------------------------------------------- -- - -. If you are using {stack} {security-features}, there are more configuration -steps. See {stack-ov}/java-clients.html[Java Client and Security]. diff --git a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc index 1a52a9dab7a87..e0de25d44ef1c 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc @@ -11,7 +11,6 @@ clusters. You will need to update the configuration for several clients to work with a secured cluster: -* <> * <> @@ -35,9 +34,6 @@ be secured as well, or at least communicate with the cluster in a secured way: :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc include::ccs-clients-integrations/cross-cluster.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc -include::ccs-clients-integrations/java.asciidoc[] - :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc include::ccs-clients-integrations/http.asciidoc[] diff --git a/x-pack/docs/en/watcher/index.asciidoc b/x-pack/docs/en/watcher/index.asciidoc index 5f51c948ebf3a..782f0886affc2 100644 --- a/x-pack/docs/en/watcher/index.asciidoc +++ b/x-pack/docs/en/watcher/index.asciidoc @@ -89,9 +89,6 @@ include::actions.asciidoc[] :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform.asciidoc include::transform.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java.asciidoc -include::java.asciidoc[] - :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/managing-watches.asciidoc include::managing-watches.asciidoc[] From c87ea81573557783d25a1023abcb92f7e9ec88aa Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Thu, 23 May 2019 18:59:30 -0400 Subject: [PATCH 238/321] Bug fix to allow access to top level params in reduce script (#42096) --- .../ScriptedMetricAggregatorFactory.java | 9 ++-- .../ScriptedMetricAggregatorTests.java | 41 +++++++++++++++++-- 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java index e08835f0bea14..01084ee0b7f8b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java @@ -89,7 +89,7 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu final ScriptedMetricAggContexts.CombineScript combineScript = this.combineScript.newInstance( mergeParams(aggParams, combineScriptParams), aggState); - final Script reduceScript = deepCopyScript(this.reduceScript, context); + final Script reduceScript = deepCopyScript(this.reduceScript, context, aggParams); if (initScript != null) { initScript.execute(); CollectionUtils.ensureNoSelfReferences(aggState, "Scripted metric aggs init script"); @@ -99,12 +99,9 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu pipelineAggregators, metaData); } - private static Script deepCopyScript(Script script, SearchContext context) { + private static Script deepCopyScript(Script script, SearchContext context, Map aggParams) { if (script != null) { - Map params = script.getParams(); - if (params != null) { - params = deepCopyParams(params, context); - } + Map params = mergeParams(aggParams, deepCopyParams(script.getParams(), context)); return new Script(script.getType(), script.getLang(), script.getIdOrCode(), params); } else { return null; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java index 05115a03e300f..5f74937f6610b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java @@ -71,7 +71,9 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { private static final Script MAP_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScriptParams", Collections.singletonMap("itemValue", 12)); private static final Script COMBINE_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScriptParams", - Collections.singletonMap("divisor", 4)); + Collections.singletonMap("multiplier", 4)); + private static final Script REDUCE_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "reduceScriptParams", + Collections.singletonMap("additional", 2)); private static final String CONFLICTING_PARAM_NAME = "initialValue"; private static final Script INIT_SCRIPT_SELF_REF = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScriptSelfRef", @@ -140,9 +142,14 @@ public static void initMockScripts() { }); SCRIPTS.put("combineScriptParams", params -> { Map state = (Map) params.get("state"); - int divisor = ((Integer) params.get("divisor")); - return ((List) state.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum(); + int multiplier = ((Integer) params.get("multiplier")); + return ((List) state.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i * multiplier).sum(); }); + SCRIPTS.put("reduceScriptParams", params -> + ((List)params.get("states")).stream().mapToInt(i -> (int)i).sum() + + (int)params.get("aggs_param") + (int)params.get("additional") - + ((List)params.get("states")).size()*24*4 + ); SCRIPTS.put("initScriptSelfRef", params -> { Map state = (Map) params.get("state"); @@ -279,7 +286,33 @@ public void testScriptParamsPassedThrough() throws IOException { ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); // The result value depends on the script params. - assertEquals(306, scriptedMetric.aggregation()); + assertEquals(4896, scriptedMetric.aggregation()); + } + } + } + + public void testAggParamsPassedToReduceScript() throws IOException { + MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, SCRIPTS, Collections.emptyMap()); + Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); + ScriptService scriptService = new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); + + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < 100; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + aggregationBuilder.params(Collections.singletonMap("aggs_param", 1)) + .initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS) + .combineScript(COMBINE_SCRIPT_PARAMS).reduceScript(REDUCE_SCRIPT_PARAMS); + ScriptedMetric scriptedMetric = searchAndReduce( + newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder, 0, scriptService); + + // The result value depends on the script params. + assertEquals(4803, scriptedMetric.aggregation()); } } } From ca7b80a7fdc76e7d8263057b3f7ac0c545fa39b5 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 23 May 2019 16:54:23 -0700 Subject: [PATCH 239/321] Reenable bwc tests (#42478) This commit reenables bwc tests now that the backport of #38373 is complete. --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index 037d3242dc4b7..7de02b814da86 100644 --- a/build.gradle +++ b/build.gradle @@ -162,8 +162,8 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = false -final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/38373" /* place a PR link here when committing bwc changes */ +boolean bwc_tests_enabled = true +final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") From 5db76677366de5bb588b7ecc4b1b921ac7663603 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 23 May 2019 20:59:35 -0700 Subject: [PATCH 240/321] Gradle init script for enabling remote build cache (#42484) --- .ci/build-cache.gradle | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .ci/build-cache.gradle diff --git a/.ci/build-cache.gradle b/.ci/build-cache.gradle new file mode 100644 index 0000000000000..b180314b40f4b --- /dev/null +++ b/.ci/build-cache.gradle @@ -0,0 +1,18 @@ +if (System.getenv('GRADLE_BUILD_CACHE_URL')) { + gradle.settingsEvaluated { settings -> + settings.buildCache { + remote(HttpBuildCache) { + url = System.getenv('GRADLE_BUILD_CACHE_URL') + push = Boolean.valueOf(System.getenv('GRADLE_BUILD_CACHE_PUSH') ?: 'false') + if (System.getenv('GRADLE_BUILD_CACHE_USERNAME') && System.getenv('GRADLE_BUILD_CACHE_PASSWORD')) { + credentials { + username = System.getenv('GRADLE_BUILD_CACHE_USERNAME') + password = System.getenv('GRADLE_BUILD_CACHE_PASSWORD') + } + } + } + } + } +} else { + throw new GradleException("You must supply a value for GRADLE_BUILD_CACHE_URL environment variable when applying build-cache.gradle init script") +} \ No newline at end of file From 2d43dd680b21738dbffd51d29f4b462d125706b5 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 24 May 2019 08:27:06 +0100 Subject: [PATCH 241/321] Add more logging to MockDiskUsagesIT (#42424) This commit adds a log message containing the routing table, emitted on each iteration of the failing assertBusy() in #40174. It also modernizes the code a bit. --- .../allocation/decider/MockDiskUsagesIT.java | 78 ++++++++----------- 1 file changed, 31 insertions(+), 47 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 1ad18c1f69f54..8565beb1b89d7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -19,10 +19,9 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.MockInternalClusterInfoService; import org.elasticsearch.cluster.routing.RoutingNode; @@ -33,10 +32,9 @@ import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; @@ -50,21 +48,15 @@ public class MockDiskUsagesIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { // Use the mock internal cluster info service, which has fake-able disk usages - return Arrays.asList(MockInternalClusterInfoService.TestPlugin.class); + return Collections.singletonList(MockInternalClusterInfoService.TestPlugin.class); } public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { List nodes = internalCluster().startNodes(3); - // Wait for all 3 nodes to be up - assertBusy(() -> { - NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().get(); - assertThat(resp.getNodes().size(), equalTo(3)); - }); - // Start with all nodes at 50% usage final MockInternalClusterInfoService cis = (MockInternalClusterInfoService) - internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); + internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); cis.setUpdateFrequency(TimeValue.timeValueMillis(200)); cis.onMaster(); cis.setN1Usage(nodes.get(0), new DiskUsage(nodes.get(0), "n1", "/dev/null", 100, 50)); @@ -73,34 +65,32 @@ public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { final boolean watermarkBytes = randomBoolean(); // we have to consistently use bytes or percentage for the disk watermark settings client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "20b" : "80%") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") - .put( - DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), - watermarkBytes ? "0b" : "100%") - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get(); + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "20b" : "80%") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), + watermarkBytes ? "0b" : "100%") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get(); // Create an index with 10 shards so we can check allocation for it prepareCreate("test").setSettings(Settings.builder() - .put("number_of_shards", 10) - .put("number_of_replicas", 0) - .put("index.routing.allocation.exclude._name", "")).get(); + .put("number_of_shards", 10) + .put("number_of_replicas", 0)).get(); ensureGreen("test"); // Block until the "fake" cluster info is retrieved at least once assertBusy(() -> { - ClusterInfo info = cis.getClusterInfo(); + final ClusterInfo info = cis.getClusterInfo(); logger.info("--> got: {} nodes", info.getNodeLeastAvailableDiskUsages().size()); assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThan(0)); }); final List realNodeNames = new ArrayList<>(); - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - Iterator iter = resp.getState().getRoutingNodes().iterator(); - while (iter.hasNext()) { - RoutingNode node = iter.next(); - realNodeNames.add(node.nodeId()); - logger.info("--> node {} has {} shards", - node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + { + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + for (final RoutingNode node : clusterState.getRoutingNodes()) { + realNodeNames.add(node.nodeId()); + logger.info("--> node {} has {} shards", + node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + } } // Update the disk usages so one node has now passed the high watermark @@ -108,17 +98,15 @@ public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50)); cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 0)); // nothing free on node3 - // Retrieve the count of shards on each node - final Map nodesToShardCount = new HashMap<>(); - assertBusy(() -> { - ClusterStateResponse resp12 = client().admin().cluster().prepareState().get(); - Iterator iter12 = resp12.getState().getRoutingNodes().iterator(); - while (iter12.hasNext()) { - RoutingNode node = iter12.next(); + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + logger.info("--> {}", clusterState.routingTable()); + + final Map nodesToShardCount = new HashMap<>(); + for (final RoutingNode node : clusterState.getRoutingNodes()) { logger.info("--> node {} has {} shards", - node.nodeId(), resp12.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - nodesToShardCount.put(node.nodeId(), resp12.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + nodesToShardCount.put(node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); } assertThat("node1 has 5 shards", nodesToShardCount.get(realNodeNames.get(0)), equalTo(5)); assertThat("node2 has 5 shards", nodesToShardCount.get(realNodeNames.get(1)), equalTo(5)); @@ -130,17 +118,13 @@ public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", "_na_", 100, 50)); cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", "_na_", 100, 50)); // node3 has free space now - // Retrieve the count of shards on each node - nodesToShardCount.clear(); - assertBusy(() -> { - ClusterStateResponse resp1 = client().admin().cluster().prepareState().get(); - Iterator iter1 = resp1.getState().getRoutingNodes().iterator(); - while (iter1.hasNext()) { - RoutingNode node = iter1.next(); + final Map nodesToShardCount = new HashMap<>(); + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + for (final RoutingNode node : clusterState.getRoutingNodes()) { logger.info("--> node {} has {} shards", - node.nodeId(), resp1.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - nodesToShardCount.put(node.nodeId(), resp1.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + nodesToShardCount.put(node.nodeId(), clusterState.getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); } assertThat("node1 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(0)), greaterThanOrEqualTo(3)); assertThat("node2 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(1)), greaterThanOrEqualTo(3)); From 360939f9d8b7ec8aa2ac1a493e0bd992ae03a0ab Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 24 May 2019 08:37:22 +0100 Subject: [PATCH 242/321] Add stack traces to RetentionLeasesIT failures (#42425) Today `RetentionLeaseIT` calls `fail(e.toString())` on some exceptions, losing the stack trace that came with the exception. This commit adjusts this to re-throw the exception wrapped in an `AssertionError` so we can see more details about failures such as #41430. --- .../index/seqno/RetentionLeaseIT.java | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index cb40a0726d42f..bbe05accb2813 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -108,7 +108,7 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); // simulate a peer recovery which locks the soft deletes policy on the primary final Closeable retentionLock = randomBoolean() ? primary.acquireRetentionLock() : () -> {}; currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); @@ -155,7 +155,7 @@ public void testRetentionLeaseSyncedOnRemove() throws Exception { final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); // simulate a peer recovery which locks the soft deletes policy on the primary final Closeable retentionLock = randomBoolean() ? primary.acquireRetentionLock() : () -> {}; currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); @@ -166,7 +166,7 @@ public void testRetentionLeaseSyncedOnRemove() throws Exception { for (int i = 0; i < length; i++) { final String id = randomFrom(currentRetentionLeases.keySet()); final CountDownLatch latch = new CountDownLatch(1); - primary.removeRetentionLease(id, ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString()))); + primary.removeRetentionLease(id, countDownLatchListener(latch)); // simulate a peer recovery which locks the soft deletes policy on the primary final Closeable retentionLock = randomBoolean() ? primary.acquireRetentionLock() : () -> {}; currentRetentionLeases.remove(id); @@ -228,7 +228,7 @@ public void testRetentionLeasesSyncOnExpiration() throws Exception { final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); final RetentionLease currentRetentionLease = primary.addRetentionLease(id, retainingSequenceNumber, source, listener); final long now = System.nanoTime(); latch.await(); @@ -390,7 +390,7 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); latch.await(); currentRetentionLeases.put(id, primary.renewRetentionLease(id, retainingSequenceNumber, source)); @@ -479,7 +479,7 @@ public void testCanRenewRetentionLeaseUnderBlock() throws InterruptedException { */ assertBusy(() -> assertThat(primary.loadRetentionLeases().leases(), contains(retentionLease.get()))); } catch (final Exception e) { - fail(e.toString()); + failWithException(e); } }); @@ -516,7 +516,7 @@ private void runUnderBlockTest( final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); primary.addRetentionLease(idForInitialRetentionLease, initialRetainingSequenceNumber, source, listener); latch.await(); @@ -545,7 +545,7 @@ public void onResponse(final ReplicationResponse replicationResponse) { @Override public void onFailure(final Exception e) { - fail(e.toString()); + failWithException(e); } }); @@ -598,7 +598,7 @@ public void testCanRenewRetentionLeaseWithoutWaitingForShards() throws Interrupt */ assertBusy(() -> assertThat(primary.loadRetentionLeases().leases(), contains(retentionLease.get()))); } catch (final Exception e) { - fail(e.toString()); + failWithException(e); } }); @@ -637,7 +637,7 @@ private void runWaitForShardsTest( final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); - final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final ActionListener listener = countDownLatchListener(latch); primary.addRetentionLease(idForInitialRetentionLease, initialRetainingSequenceNumber, source, listener); latch.await(); @@ -665,7 +665,7 @@ public void onResponse(final ReplicationResponse replicationResponse) { @Override public void onFailure(final Exception e) { - fail(e.toString()); + failWithException(e); } }); @@ -674,4 +674,12 @@ public void onFailure(final Exception e) { afterSync.accept(primary); } + private static void failWithException(Exception e) { + throw new AssertionError("unexpected", e); + } + + private static ActionListener countDownLatchListener(CountDownLatch latch) { + return ActionListener.wrap(r -> latch.countDown(), RetentionLeaseIT::failWithException); + } + } From c1de8c29db228bb633f6766684874ed3321d7200 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 24 May 2019 08:43:16 +0100 Subject: [PATCH 243/321] Cluster state from API should always have a master (#42454) Today the `TransportClusterStateAction` ignores the state passed by the `TransportMasterNodeAction` and obtains its state from the cluster applier. This might be inconsistent, showing a different node as the master or maybe even having no master. This change adjusts the action to use the passed-in state directly, and adds tests showing that the state returned is consistent with our expectations even if there is a concurrent master failover. Fixes #38331 Relates #38432 --- .../state/ClusterStateRequestBuilder.java | 18 ++ .../state/TransportClusterStateAction.java | 78 ++++---- ...ansportClusterStateActionDisruptionIT.java | 182 ++++++++++++++++++ .../test/InternalTestCluster.java | 7 +- 4 files changed, 241 insertions(+), 44 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index 35020556b1ed3..da5074b41aa4f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.unit.TimeValue; public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder { @@ -100,4 +101,21 @@ public ClusterStateRequestBuilder setIndicesOptions(IndicesOptions indicesOption request.indicesOptions(indicesOptions); return this; } + + /** + * Causes the request to wait for the metadata version to advance to at least the given version. + * @param waitForMetaDataVersion The metadata version for which to wait + */ + public ClusterStateRequestBuilder setWaitForMetaDataVersion(long waitForMetaDataVersion) { + request.waitForMetaDataVersion(waitForMetaDataVersion); + return this; + } + + /** + * If {@link ClusterStateRequest#waitForMetaDataVersion()} is set then this determines how long to wait + */ + public ClusterStateRequestBuilder setWaitForTimeOut(TimeValue waitForTimeout) { + request.waitForTimeout(waitForTimeout); + return this; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 3248ac167fcbe..cedca2d77e192 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -79,50 +80,50 @@ protected ClusterStateResponse newResponse() { protected void masterOperation(final ClusterStateRequest request, final ClusterState state, final ActionListener listener) throws IOException { - if (request.waitForMetaDataVersion() != null) { - final Predicate metadataVersionPredicate = clusterState -> { - return clusterState.metaData().version() >= request.waitForMetaDataVersion(); - }; - final ClusterStateObserver observer = - new ClusterStateObserver(clusterService, request.waitForTimeout(), logger, threadPool.getThreadContext()); - final ClusterState clusterState = observer.setAndGetObservedState(); - if (metadataVersionPredicate.test(clusterState)) { - buildResponse(request, clusterState, listener); - } else { - observer.waitForNextChange(new ClusterStateObserver.Listener() { - @Override - public void onNewClusterState(ClusterState state) { - try { - buildResponse(request, state, listener); - } catch (Exception e) { - listener.onFailure(e); - } - } + final Predicate acceptableClusterStatePredicate + = request.waitForMetaDataVersion() == null ? clusterState -> true + : clusterState -> clusterState.metaData().version() >= request.waitForMetaDataVersion(); + + final Predicate acceptableClusterStateOrNotMasterPredicate = request.local() + ? acceptableClusterStatePredicate + : acceptableClusterStatePredicate.or(clusterState -> clusterState.nodes().isLocalNodeElectedMaster() == false); - @Override - public void onClusterServiceClose() { - listener.onFailure(new NodeClosedException(clusterService.localNode())); + if (acceptableClusterStatePredicate.test(state)) { + ActionListener.completeWith(listener, () -> buildResponse(request, state)); + } else { + assert acceptableClusterStateOrNotMasterPredicate.test(state) == false; + new ClusterStateObserver(state, clusterService, request.waitForTimeout(), logger, threadPool.getThreadContext()) + .waitForNextChange(new ClusterStateObserver.Listener() { + + @Override + public void onNewClusterState(ClusterState newState) { + if (acceptableClusterStatePredicate.test(newState)) { + ActionListener.completeWith(listener, () -> buildResponse(request, newState)); + } else { + listener.onFailure(new NotMasterException( + "master stepped down waiting for metadata version " + request.waitForMetaDataVersion())); } + } - @Override - public void onTimeout(TimeValue timeout) { - try { - listener.onResponse(new ClusterStateResponse(clusterState.getClusterName(), null, true)); - } catch (Exception e) { - listener.onFailure(e); - } + @Override + public void onClusterServiceClose() { + listener.onFailure(new NodeClosedException(clusterService.localNode())); + } + + @Override + public void onTimeout(TimeValue timeout) { + try { + listener.onResponse(new ClusterStateResponse(state.getClusterName(), null, true)); + } catch (Exception e) { + listener.onFailure(e); } - }, metadataVersionPredicate); - } - } else { - ClusterState currentState = clusterService.state(); - buildResponse(request, currentState, listener); + } + }, acceptableClusterStateOrNotMasterPredicate); } } - private void buildResponse(final ClusterStateRequest request, - final ClusterState currentState, - final ActionListener listener) throws IOException { + private ClusterStateResponse buildResponse(final ClusterStateRequest request, + final ClusterState currentState) { logger.trace("Serving cluster state request using version {}", currentState.version()); ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); builder.version(currentState.version()); @@ -184,8 +185,7 @@ private void buildResponse(final ClusterStateRequest request, } } - listener.onResponse(new ClusterStateResponse(currentState.getClusterName(), builder.build(), false)); + return new ClusterStateResponse(currentState.getClusterName(), builder.build(), false); } - } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java new file mode 100644 index 0000000000000..0d51f647ee28c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java @@ -0,0 +1,182 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.state; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; + +@ESIntegTestCase.ClusterScope(numDataNodes = 0, scope = ESIntegTestCase.Scope.TEST, transportClientRatio = 0) +public class TransportClusterStateActionDisruptionIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(MockTransportService.TestPlugin.class); + } + + public void testNonLocalRequestAlwaysFindsMaster() throws Exception { + runRepeatedlyWhileChangingMaster(() -> { + final ClusterStateRequestBuilder clusterStateRequestBuilder = client().admin().cluster().prepareState() + .clear().setNodes(true).setMasterNodeTimeout("100ms"); + final ClusterStateResponse clusterStateResponse; + try { + clusterStateResponse = clusterStateRequestBuilder.get(); + } catch (MasterNotDiscoveredException e) { + return; // ok, we hit the disconnected node + } + assertNotNull("should always contain a master node", clusterStateResponse.getState().nodes().getMasterNodeId()); + }); + } + + public void testLocalRequestAlwaysSucceeds() throws Exception { + runRepeatedlyWhileChangingMaster(() -> { + final String node = randomFrom(internalCluster().getNodeNames()); + final DiscoveryNodes discoveryNodes = client(node).admin().cluster().prepareState() + .clear().setLocal(true).setNodes(true).setMasterNodeTimeout("100ms").get().getState().nodes(); + for (DiscoveryNode discoveryNode : discoveryNodes) { + if (discoveryNode.getName().equals(node)) { + return; + } + } + fail("nodes did not contain [" + node + "]: " + discoveryNodes); + }); + } + + public void testNonLocalRequestAlwaysFindsMasterAndWaitsForMetadata() throws Exception { + runRepeatedlyWhileChangingMaster(() -> { + final String node = randomFrom(internalCluster().getNodeNames()); + final long metadataVersion + = internalCluster().getInstance(ClusterService.class, node).getClusterApplierService().state().metaData().version(); + final long waitForMetaDataVersion = randomLongBetween(Math.max(1, metadataVersion - 3), metadataVersion + 5); + final ClusterStateRequestBuilder clusterStateRequestBuilder = client(node).admin().cluster().prepareState() + .clear().setNodes(true).setMetaData(true) + .setMasterNodeTimeout(TimeValue.timeValueMillis(100)).setWaitForTimeOut(TimeValue.timeValueMillis(100)) + .setWaitForMetaDataVersion(waitForMetaDataVersion); + final ClusterStateResponse clusterStateResponse; + try { + clusterStateResponse = clusterStateRequestBuilder.get(); + } catch (MasterNotDiscoveredException e) { + return; // ok, we hit the disconnected node + } + if (clusterStateResponse.isWaitForTimedOut() == false) { + final ClusterState state = clusterStateResponse.getState(); + assertNotNull("should always contain a master node", state.nodes().getMasterNodeId()); + assertThat("waited for metadata version", state.metaData().version(), greaterThanOrEqualTo(waitForMetaDataVersion)); + } + }); + } + + public void testLocalRequestWaitsForMetadata() throws Exception { + runRepeatedlyWhileChangingMaster(() -> { + final String node = randomFrom(internalCluster().getNodeNames()); + final long metadataVersion + = internalCluster().getInstance(ClusterService.class, node).getClusterApplierService().state().metaData().version(); + final long waitForMetaDataVersion = randomLongBetween(Math.max(1, metadataVersion - 3), metadataVersion + 5); + final ClusterStateResponse clusterStateResponse = client(node).admin().cluster() + .prepareState().clear().setLocal(true).setMetaData(true).setWaitForMetaDataVersion(waitForMetaDataVersion) + .setMasterNodeTimeout(TimeValue.timeValueMillis(100)).setWaitForTimeOut(TimeValue.timeValueMillis(100)) + .get(); + if (clusterStateResponse.isWaitForTimedOut() == false) { + final MetaData metaData = clusterStateResponse.getState().metaData(); + assertThat("waited for metadata version " + waitForMetaDataVersion + " with node " + node, + metaData.version(), greaterThanOrEqualTo(waitForMetaDataVersion)); + } + }); + } + + public void runRepeatedlyWhileChangingMaster(Runnable runnable) throws Exception { + internalCluster().startNodes(3); + + assertBusy(() -> assertThat(client().admin().cluster().prepareState().clear().setMetaData(true) + .get().getState().getLastCommittedConfiguration().getNodeIds().stream() + .filter(n -> ClusterBootstrapService.isBootstrapPlaceholder(n) == false).collect(Collectors.toSet()), hasSize(3))); + + final String masterName = internalCluster().getMasterName(); + + final AtomicBoolean shutdown = new AtomicBoolean(); + final Thread assertingThread = new Thread(() -> { + while (shutdown.get() == false) { + runnable.run(); + } + }, "asserting thread"); + + final Thread updatingThread = new Thread(() -> { + String value = "none"; + while (shutdown.get() == false) { + value = "none".equals(value) ? "all" : "none"; + final String nonMasterNode = randomValueOtherThan(masterName, () -> randomFrom(internalCluster().getNodeNames())); + assertAcked(client(nonMasterNode).admin().cluster().prepareUpdateSettings().setPersistentSettings( + Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), value))); + } + }, "updating thread"); + + final List mockTransportServices + = StreamSupport.stream(internalCluster().getInstances(TransportService.class).spliterator(), false) + .map(ts -> (MockTransportService) ts).collect(Collectors.toList()); + + assertingThread.start(); + updatingThread.start(); + + final MockTransportService masterTransportService + = (MockTransportService) internalCluster().getInstance(TransportService.class, masterName); + + for (MockTransportService mockTransportService : mockTransportServices) { + if (masterTransportService != mockTransportService) { + masterTransportService.addFailToSendNoConnectRule(mockTransportService); + mockTransportService.addFailToSendNoConnectRule(masterTransportService); + } + } + + assertBusy(() -> { + final String nonMasterNode = randomValueOtherThan(masterName, () -> randomFrom(internalCluster().getNodeNames())); + final String claimedMasterName = internalCluster().getMasterName(nonMasterNode); + assertThat(claimedMasterName, not(equalTo(masterName))); + }); + + shutdown.set(true); + assertingThread.join(); + updatingThread.join(); + internalCluster().close(); + } + +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index cc071df9769ca..3b4f8c8f55d4c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -149,8 +149,8 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_TYPE_SETTING; import static org.elasticsearch.discovery.DiscoveryModule.ZEN2_DISCOVERY_TYPE; -import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.discovery.FileBasedSeedHostsProvider.UNICAST_HOSTS_FILE; +import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.test.ESTestCase.assertBusy; import static org.elasticsearch.test.ESTestCase.awaitBusy; import static org.elasticsearch.test.ESTestCase.getTestTransportType; @@ -161,7 +161,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -1884,9 +1883,7 @@ public String getMasterName() { public String getMasterName(@Nullable String viaNode) { try { Client client = viaNode != null ? client(viaNode) : client(); - final DiscoveryNode masterNode = client.admin().cluster().prepareState().get().getState().nodes().getMasterNode(); - assertNotNull(masterNode); - return masterNode.getName(); + return client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(); } catch (Exception e) { logger.warn("Can't fetch cluster state", e); throw new RuntimeException("Can't get master node " + e.getMessage(), e); From 4b21100178cdc584193df32dc6b8e2e6fde902d2 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 24 May 2019 10:44:59 +0200 Subject: [PATCH 244/321] Remove IndexStore and DirectoryService (#42446) Both of these classes are basically a bloated wrapper around a simple construct that can simply be a DirectoryFactory interface. This change removes both classes and replaces them with a simple stateless interface that creates a new `Directory` per shard. The concept of `index.store` is preserved since it makes sense from a configuration perspective. --- ...ce.java => SmbMmapFsDirectoryFactory.java} | 14 +--- .../store/smbmmapfs/SmbMmapFsIndexStore.java | 37 ---------- ....java => SmbSimpleFsDirectoryFactory.java} | 12 +-- .../smbsimplefs/SmbSimpleFsIndexStore.java | 38 ---------- .../plugin/store/smb/SMBStorePlugin.java | 13 ++-- .../common/settings/IndexScopedSettings.java | 4 +- .../elasticsearch/env/NodeEnvironment.java | 4 +- .../org/elasticsearch/index/IndexModule.java | 35 +++++---- .../org/elasticsearch/index/IndexService.java | 19 +++-- ...ryService.java => FsDirectoryFactory.java} | 18 ++--- .../elasticsearch/index/store/IndexStore.java | 39 ---------- .../elasticsearch/indices/IndicesService.java | 14 ++-- .../java/org/elasticsearch/node/Node.java | 5 +- .../plugins/IndexStorePlugin.java | 30 ++++++-- .../elasticsearch/index/IndexModuleTests.java | 18 +++-- .../index/shard/IndexShardTests.java | 4 +- ...ests.java => FsDirectoryFactoryTests.java} | 56 +++++++++++--- .../index/store/FsDirectoryServiceTests.java | 73 ------------------- .../plugins/IndexStorePluginTests.java | 22 +++--- .../basic/SearchWithRandomIOExceptionsIT.java | 14 ++-- ...rvice.java => MockFSDirectoryFactory.java} | 63 +++++----------- .../test/store/MockFSIndexStore.java | 29 ++------ 22 files changed, 182 insertions(+), 379 deletions(-) rename plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/{SmbMmapFsDirectoryService.java => SmbMmapFsDirectoryFactory.java} (74%) delete mode 100644 plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java rename plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/{SmbSimpleFsDirectoryService.java => SmbSimpleFsDirectoryFactory.java} (76%) delete mode 100644 plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java rename server/src/main/java/org/elasticsearch/index/store/{FsDirectoryService.java => FsDirectoryFactory.java} (94%) delete mode 100644 server/src/main/java/org/elasticsearch/index/store/IndexStore.java rename server/src/test/java/org/elasticsearch/index/store/{IndexStoreTests.java => FsDirectoryFactoryTests.java} (59%) delete mode 100644 server/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java rename test/framework/src/main/java/org/elasticsearch/test/store/{MockFSDirectoryService.java => MockFSDirectoryFactory.java} (78%) diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryFactory.java similarity index 74% rename from plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java rename to plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryFactory.java index 1264464cf0071..13b6f9401abc5 100644 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java +++ b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryFactory.java @@ -23,22 +23,16 @@ import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.MMapDirectory; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.index.store.SmbDirectoryWrapper; import java.io.IOException; import java.nio.file.Path; -public class SmbMmapFsDirectoryService extends FsDirectoryService { - - public SmbMmapFsDirectoryService(IndexSettings indexSettings, ShardPath path) { - super(indexSettings, path); - } +public final class SmbMmapFsDirectoryFactory extends FsDirectoryFactory { @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - logger.debug("wrapping MMapDirectory for SMB"); - return new SmbDirectoryWrapper(new MMapDirectory(location, indexSettings.getValue(INDEX_LOCK_FACTOR_SETTING))); + protected Directory newFSDirectory(Path location, LockFactory lockFactory, IndexSettings indexSettings) throws IOException { + return new SmbDirectoryWrapper(new MMapDirectory(location, lockFactory)); } } diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java deleted file mode 100644 index 0399348966361..0000000000000 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.smbmmapfs; - -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.IndexStore; - -public class SmbMmapFsIndexStore extends IndexStore { - - public SmbMmapFsIndexStore(IndexSettings indexSettings) { - super(indexSettings); - } - - @Override - public DirectoryService newDirectoryService(ShardPath path) { - return new SmbMmapFsDirectoryService(indexSettings, path); - } -} diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryFactory.java similarity index 76% rename from plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java rename to plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryFactory.java index 87e45a02cf6cb..e5e9025f82d85 100644 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java +++ b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryFactory.java @@ -23,22 +23,16 @@ import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.index.store.SmbDirectoryWrapper; import java.io.IOException; import java.nio.file.Path; -public class SmbSimpleFsDirectoryService extends FsDirectoryService { - - public SmbSimpleFsDirectoryService(IndexSettings indexSettings, ShardPath path) { - super(indexSettings, path); - } +public final class SmbSimpleFsDirectoryFactory extends FsDirectoryFactory { @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - logger.debug("wrapping SimpleFSDirectory for SMB"); + protected Directory newFSDirectory(Path location, LockFactory lockFactory, IndexSettings indexSettings) throws IOException { return new SmbDirectoryWrapper(new SimpleFSDirectory(location, lockFactory)); } } diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java deleted file mode 100644 index 3b6b3c3c8990f..0000000000000 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.smbsimplefs; - -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.IndexStore; - -public class SmbSimpleFsIndexStore extends IndexStore { - - public SmbSimpleFsIndexStore(IndexSettings indexSettings) { - super(indexSettings); - } - - @Override - public DirectoryService newDirectoryService(ShardPath path) { - return new SmbSimpleFsDirectoryService(indexSettings, path); - } -} - diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java b/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java index 111100a2f1580..bb818e9b53d38 100644 --- a/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java +++ b/plugins/store-smb/src/main/java/org/elasticsearch/plugin/store/smb/SMBStorePlugin.java @@ -19,23 +19,20 @@ package org.elasticsearch.plugin.store.smb; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.index.store.smbmmapfs.SmbMmapFsIndexStore; -import org.elasticsearch.index.store.smbsimplefs.SmbSimpleFsIndexStore; +import org.elasticsearch.index.store.smbmmapfs.SmbMmapFsDirectoryFactory; +import org.elasticsearch.index.store.smbsimplefs.SmbSimpleFsDirectoryFactory; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.Plugin; import java.util.Map; -import java.util.function.Function; public class SMBStorePlugin extends Plugin implements IndexStorePlugin { @Override - public Map> getIndexStoreFactories() { + public Map getDirectoryFactories() { return Map.of( - "smb_mmap_fs", SmbMmapFsIndexStore::new, - "smb_simple_fs", SmbSimpleFsIndexStore::new); + "smb_mmap_fs", new SmbMmapFsDirectoryFactory(), + "smb_simple_fs", new SmbSimpleFsDirectoryFactory()); } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 2acbbec3f8171..907277b53dde9 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesRequestCache; @@ -157,7 +157,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexModule.INDEX_STORE_TYPE_SETTING, IndexModule.INDEX_STORE_PRE_LOAD_SETTING, IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, - FsDirectoryService.INDEX_LOCK_FACTOR_SETTING, + FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING, Store.FORCE_RAM_TERM_DICT, EngineConfig.INDEX_CODEC_SETTING, IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS, diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 497c6a9e06459..4d19dd66732fc 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -52,7 +52,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -430,7 +430,7 @@ public static void acquireFSLockForPaths(IndexSettings indexSettings, Path... sh // resolve the directory the shard actually lives in Path p = shardPaths[i].resolve("index"); // open a directory (will be immediately closed) on the shard's location - dirs[i] = new SimpleFSDirectory(p, indexSettings.getValue(FsDirectoryService.INDEX_LOCK_FACTOR_SETTING)); + dirs[i] = new SimpleFSDirectory(p, indexSettings.getValue(FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING)); // create a lock for the "write.lock" file try { locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME); diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index acec458b8b0cd..ca0f34803cc0c 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -45,7 +45,8 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -75,7 +76,7 @@ * {@link #addSimilarity(String, TriFunction)} while existing Providers can be referenced through Settings under the * {@link IndexModule#SIMILARITY_SETTINGS_PREFIX} prefix along with the "type" value. For example, to reference the * {@link BM25Similarity}, the configuration {@code "index.similarity.my_similarity.type : "BM25"} can be used. - *
  • {@link IndexStore} - Custom {@link IndexStore} instances can be registered via {@link IndexStorePlugin}
  • + *
  • {@link DirectoryService} - Custom {@link DirectoryService} instances can be registered via {@link IndexStorePlugin}
  • *
  • {@link IndexEventListener} - Custom {@link IndexEventListener} instances can be registered via * {@link #addIndexEventListener(IndexEventListener)}
  • *
  • Settings update listener - Custom settings update listener can be registered via @@ -86,6 +87,8 @@ public final class IndexModule { public static final Setting NODE_STORE_ALLOW_MMAP = Setting.boolSetting("node.store.allow_mmap", true, Property.NodeScope); + private static final FsDirectoryFactory DEFAULT_DIRECTORY_FACTORY = new FsDirectoryFactory(); + public static final Setting INDEX_STORE_TYPE_SETTING = new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope); @@ -112,7 +115,7 @@ public final class IndexModule { private SetOnce indexSearcherWrapper = new SetOnce<>(); private final Set indexEventListeners = new HashSet<>(); private final Map> similarities = new HashMap<>(); - private final Map> indexStoreFactories; + private final Map directoryFactories; private final SetOnce> forceQueryCacheProvider = new SetOnce<>(); private final List searchOperationListeners = new ArrayList<>(); private final List indexOperationListeners = new ArrayList<>(); @@ -125,19 +128,19 @@ public final class IndexModule { * @param indexSettings the index settings * @param analysisRegistry the analysis registry * @param engineFactory the engine factory - * @param indexStoreFactories the available store types + * @param directoryFactories the available store types */ public IndexModule( final IndexSettings indexSettings, final AnalysisRegistry analysisRegistry, final EngineFactory engineFactory, - final Map> indexStoreFactories) { + final Map directoryFactories) { this.indexSettings = indexSettings; this.analysisRegistry = analysisRegistry; this.engineFactory = Objects.requireNonNull(engineFactory); this.searchOperationListeners.add(new SearchSlowLog(indexSettings)); this.indexOperationListeners.add(new IndexingSlowLog(indexSettings)); - this.indexStoreFactories = Collections.unmodifiableMap(indexStoreFactories); + this.directoryFactories = Collections.unmodifiableMap(directoryFactories); } /** @@ -384,7 +387,7 @@ public IndexService newIndexService( IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get(); eventListener.beforeIndexCreated(indexSettings.getIndex(), indexSettings.getSettings()); - final IndexStore store = getIndexStore(indexSettings, indexStoreFactories); + final IndexStorePlugin.DirectoryFactory directoryFactory = getDirectoryFactory(indexSettings, directoryFactories); final QueryCache queryCache; if (indexSettings.getValue(INDEX_QUERY_CACHE_ENABLED_SETTING)) { BiFunction queryCacheProvider = forceQueryCacheProvider.get(); @@ -399,12 +402,12 @@ public IndexService newIndexService( return new IndexService(indexSettings, indexCreationContext, environment, xContentRegistry, new SimilarityService(indexSettings, scriptService, similarities), shardStoreDeleter, analysisRegistry, engineFactory, circuitBreakerService, bigArrays, threadPool, scriptService, - client, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, + client, queryCache, directoryFactory, eventListener, searcherWrapperFactory, mapperRegistry, indicesFieldDataCache, searchOperationListeners, indexOperationListeners, namedWriteableRegistry); } - private static IndexStore getIndexStore( - final IndexSettings indexSettings, final Map> indexStoreFactories) { + private static IndexStorePlugin.DirectoryFactory getDirectoryFactory( + final IndexSettings indexSettings, final Map indexStoreFactories) { final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING); final Type type; final Boolean allowMmap = NODE_STORE_ALLOW_MMAP.get(indexSettings.getNodeSettings()); @@ -420,20 +423,16 @@ private static IndexStore getIndexStore( if (allowMmap == false && (type == Type.MMAPFS || type == Type.HYBRIDFS)) { throw new IllegalArgumentException("store type [" + storeType + "] is not allowed because mmap is disabled"); } - final IndexStore store; + final IndexStorePlugin.DirectoryFactory factory; if (storeType.isEmpty() || isBuiltinType(storeType)) { - store = new IndexStore(indexSettings); + factory = DEFAULT_DIRECTORY_FACTORY; } else { - Function factory = indexStoreFactories.get(storeType); + factory = indexStoreFactories.get(storeType); if (factory == null) { throw new IllegalArgumentException("Unknown store type [" + storeType + "]"); } - store = factory.apply(indexSettings); - if (store == null) { - throw new IllegalStateException("store must not be null"); - } } - return store; + return factory; } /** diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index f5deb99c80d80..2d86a2b436d9d 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Sort; import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.Directory; import org.apache.lucene.util.Accountable; import org.elasticsearch.Assertions; import org.elasticsearch.Version; @@ -66,14 +67,13 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; @@ -103,7 +103,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final BitsetFilterCache bitsetFilterCache; private final NodeEnvironment nodeEnv; private final ShardStoreDeleter shardStoreDeleter; - private final IndexStore indexStore; + private final IndexStorePlugin.DirectoryFactory directoryFactory; private final IndexSearcherWrapper searcherWrapper; private final IndexCache indexCache; private final MapperService mapperService; @@ -149,7 +149,7 @@ public IndexService( ScriptService scriptService, Client client, QueryCache queryCache, - IndexStore indexStore, + IndexStorePlugin.DirectoryFactory directoryFactory, IndexEventListener eventListener, IndexModule.IndexSearcherWrapperFactory wrapperFactory, MapperRegistry mapperRegistry, @@ -200,7 +200,7 @@ public IndexService( this.client = client; this.eventListener = eventListener; this.nodeEnv = nodeEnv; - this.indexStore = indexStore; + this.directoryFactory = directoryFactory; this.engineFactory = Objects.requireNonNull(engineFactory); // initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE this.searcherWrapper = wrapperFactory.newWrapper(this); @@ -401,9 +401,8 @@ public synchronized IndexShard createShard( warmer.warm(searcher, shard, IndexService.this.indexSettings); } }; - // TODO we can remove either IndexStore or DirectoryService. All we need is a simple Supplier - DirectoryService directoryService = indexStore.newDirectoryService(path); - store = new Store(shardId, this.indexSettings, directoryService.newDirectory(), lock, + Directory directory = directoryFactory.newDirectory(this.indexSettings, path); + store = new Store(shardId, this.indexSettings, directory, lock, new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId))); eventListener.onStoreCreated(shardId); indexShard = new IndexShard( @@ -753,8 +752,8 @@ final IndexSearcherWrapper getSearcherWrapper() { return searcherWrapper; } // pkg private for testing - final IndexStore getIndexStore() { - return indexStore; + final IndexStorePlugin.DirectoryFactory getDirectoryFactory() { + return directoryFactory; } // pkg private for testing private void maybeFSyncTranslogs() { diff --git a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryFactory.java similarity index 94% rename from server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java rename to server/src/main/java/org/elasticsearch/index/store/FsDirectoryFactory.java index a8b50fcc53895..84bb4c49b27d4 100644 --- a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryFactory.java @@ -30,13 +30,13 @@ import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.store.SimpleFSLockFactory; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.plugins.IndexStorePlugin; import java.io.IOException; import java.nio.file.Files; @@ -44,7 +44,8 @@ import java.util.HashSet; import java.util.Set; -public class FsDirectoryService extends DirectoryService { +public class FsDirectoryFactory implements IndexStorePlugin.DirectoryFactory { + public static final Setting INDEX_LOCK_FACTOR_SETTING = new Setting<>("index.store.fs.fs_lock", "native", (s) -> { switch (s) { case "native": @@ -56,27 +57,20 @@ public class FsDirectoryService extends DirectoryService { } // can we set on both - node and index level, some nodes might be running on NFS so they might need simple rather than native }, Property.IndexScope, Property.NodeScope); - private final ShardPath path; - - @Inject - public FsDirectoryService(IndexSettings indexSettings, ShardPath path) { - super(path.getShardId(), indexSettings); - this.path = path; - } @Override - public Directory newDirectory() throws IOException { + public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { final Path location = path.resolveIndex(); final LockFactory lockFactory = indexSettings.getValue(INDEX_LOCK_FACTOR_SETTING); Files.createDirectories(location); - Directory wrapped = newFSDirectory(location, lockFactory); + Directory wrapped = newFSDirectory(location, lockFactory, indexSettings); Set preLoadExtensions = new HashSet<>( indexSettings.getValue(IndexModule.INDEX_STORE_PRE_LOAD_SETTING)); wrapped = setPreload(wrapped, location, lockFactory, preLoadExtensions); return wrapped; } - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { + protected Directory newFSDirectory(Path location, LockFactory lockFactory, IndexSettings indexSettings) throws IOException { final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()); IndexModule.Type type; diff --git a/server/src/main/java/org/elasticsearch/index/store/IndexStore.java b/server/src/main/java/org/elasticsearch/index/store/IndexStore.java deleted file mode 100644 index 0d41b1ac95d18..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/store/IndexStore.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store; - -import org.elasticsearch.index.AbstractIndexComponent; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardPath; - -public class IndexStore extends AbstractIndexComponent { - - public IndexStore(IndexSettings indexSettings) { - super(indexSettings); - } - - /** - * The shard store class that should be used for each shard. - */ - public DirectoryService newDirectoryService(ShardPath path) { - return new FsDirectoryService(indexSettings, path); - } - -} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index be5e1cae4fa8e..16382d15cd325 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -110,13 +110,13 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; @@ -200,7 +200,7 @@ public class IndicesService extends AbstractLifecycleComponent private final IndicesQueryCache indicesQueryCache; private final MetaStateService metaStateService; private final Collection>> engineFactoryProviders; - private final Map> indexStoreFactories; + private final Map directoryFactories; final AbstractRefCounted indicesRefCount; // pkg-private for testing private final CountDownLatch closeLatch = new CountDownLatch(1); @@ -216,7 +216,7 @@ public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvi IndexScopedSettings indexScopedSettings, CircuitBreakerService circuitBreakerService, BigArrays bigArrays, ScriptService scriptService, Client client, MetaStateService metaStateService, Collection>> engineFactoryProviders, - Map> indexStoreFactories) { + Map directoryFactories) { this.settings = settings; this.threadPool = threadPool; this.pluginsService = pluginsService; @@ -251,13 +251,13 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.engineFactoryProviders = engineFactoryProviders; // do not allow any plugin-provided index store type to conflict with a built-in type - for (final String indexStoreType : indexStoreFactories.keySet()) { + for (final String indexStoreType : directoryFactories.keySet()) { if (IndexModule.isBuiltinType(indexStoreType)) { throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type"); } } - this.indexStoreFactories = indexStoreFactories; + this.directoryFactories = directoryFactories; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to // avoid closing these resources while ongoing requests are still being processed, we use a @@ -547,7 +547,7 @@ private synchronized IndexService createIndexService(IndexService.IndexCreationC idxSettings.getNumberOfReplicas(), indexCreationContext); - final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry, getEngineFactory(idxSettings), indexStoreFactories); + final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry, getEngineFactory(idxSettings), directoryFactories); for (IndexingOperationListener operationListener : indexingOperationListeners) { indexModule.addIndexOperationListener(operationListener); } @@ -614,7 +614,7 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { */ public synchronized MapperService createIndexMapperService(IndexMetaData indexMetaData) throws IOException { final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopedSettings); - final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry, getEngineFactory(idxSettings), indexStoreFactories); + final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry, getEngineFactory(idxSettings), directoryFactories); pluginsService.onIndexModule(indexModule); return indexModule.newIndexMapperService(xContentRegistry, mapperRegistry, scriptService); } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index fab08ab1c03f7..6e592ba324fb4 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -99,7 +99,6 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.engine.EngineFactory; -import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -413,10 +412,10 @@ protected Node( .collect(Collectors.toList()); - final Map> indexStoreFactories = + final Map indexStoreFactories = pluginsService.filterPlugins(IndexStorePlugin.class) .stream() - .map(IndexStorePlugin::getIndexStoreFactories) + .map(IndexStorePlugin::getDirectoryFactories) .flatMap(m -> m.entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); diff --git a/server/src/main/java/org/elasticsearch/plugins/IndexStorePlugin.java b/server/src/main/java/org/elasticsearch/plugins/IndexStorePlugin.java index 16eec535e4b4a..2beaf1935e409 100644 --- a/server/src/main/java/org/elasticsearch/plugins/IndexStorePlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/IndexStorePlugin.java @@ -19,24 +19,40 @@ package org.elasticsearch.plugins; +import org.apache.lucene.store.Directory; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.shard.ShardPath; +import java.io.IOException; import java.util.Map; -import java.util.function.Function; /** - * A plugin that provides alternative index store implementations. + * A plugin that provides alternative directory implementations. */ public interface IndexStorePlugin { /** - * The index store factories for this plugin. When an index is created the store type setting + * An interface that describes how to create a new directory instance per shard. + */ + @FunctionalInterface + interface DirectoryFactory { + /** + * Creates a new directory per shard. This method is called once per shard on shard creation. + * @param indexSettings the shards index settings + * @param shardPath the path the shard is using + * @return a new lucene directory instance + * @throws IOException if an IOException occurs while opening the directory + */ + Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException; + } + + /** + * The {@link DirectoryFactory} mappings for this plugin. When an index is created the store type setting * {@link org.elasticsearch.index.IndexModule#INDEX_STORE_TYPE_SETTING} on the index will be examined and either use the default or a - * built-in type, or looked up among all the index store factories from {@link IndexStore} plugins. + * built-in type, or looked up among all the directory factories from {@link IndexStorePlugin} plugins. * - * @return a map from store type to an index store factory + * @return a map from store type to an directory factory */ - Map> getIndexStoreFactories(); + Map getDirectoryFactories(); } diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 351cccdff4aa0..d0f811007a6fa 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.store.Directory; import org.apache.lucene.util.SetOnce.AlreadySetException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -60,9 +61,10 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.similarity.NonNegativeScoresSimilarity; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -70,6 +72,7 @@ import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ClusterServiceUtils; @@ -86,7 +89,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; @@ -174,11 +176,12 @@ public void testRegisterIndexStore() throws IOException { .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "foo_store") .build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings); - final Map> indexStoreFactories = Collections.singletonMap("foo_store", FooStore::new); + final Map indexStoreFactories = Collections.singletonMap( + "foo_store", new FooFunction()); final IndexModule module = new IndexModule(indexSettings, emptyAnalysisRegistry, new InternalEngineFactory(), indexStoreFactories); final IndexService indexService = newIndexService(module); - assertThat(indexService.getIndexStore(), instanceOf(FooStore.class)); + assertThat(indexService.getDirectoryFactory(), instanceOf(FooFunction.class)); indexService.close("simon says", false); } @@ -444,10 +447,11 @@ public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermS } } - public static final class FooStore extends IndexStore { + public static final class FooFunction implements IndexStorePlugin.DirectoryFactory { - public FooStore(IndexSettings indexSettings) { - super(indexSettings); + @Override + public Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException { + return new FsDirectoryFactory().newDirectory(indexSettings, shardPath); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 1710154f72f94..5187ef37fcdf8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -125,7 +125,7 @@ import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.FieldMaskingReader; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.store.MockFSDirectoryService; +import org.elasticsearch.test.store.MockFSDirectoryFactory; import org.elasticsearch.threadpool.ThreadPool; import org.junit.Assert; @@ -3819,7 +3819,7 @@ public InternalEngine recoverFromTranslog(TranslogRecoveryRunner translogRecover readyToCloseLatch.await(); shard.close("testing", false); // in integration tests, this is done as a listener on IndexService. - MockFSDirectoryService.checkIndex(logger, shard.store(), shard.shardId); + MockFSDirectoryFactory.checkIndex(logger, shard.store(), shard.shardId); } catch (InterruptedException | IOException e) { throw new AssertionError(e); } finally { diff --git a/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java similarity index 59% rename from server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java rename to server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java index 21c8d1c1d78a4..0f24f8f3a5a4f 100644 --- a/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java @@ -19,10 +19,12 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FileSwitchDirectory; import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.store.NoLockFactory; import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.SleepingLockWrapper; import org.apache.lucene.util.Constants; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -36,32 +38,68 @@ import org.elasticsearch.test.IndexSettingsModule; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; +import java.util.Arrays; import java.util.Locale; -public class IndexStoreTests extends ESTestCase { +public class FsDirectoryFactoryTests extends ESTestCase { + + public void testPreload() throws IOException { + doTestPreload(); + doTestPreload("nvd", "dvd", "tim"); + doTestPreload("*"); + } + + private void doTestPreload(String...preload) throws IOException { + Settings build = Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "mmapfs") + .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), preload) + .build(); + IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build); + Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0"); + Files.createDirectories(tempDir); + ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0)); + FsDirectoryFactory fsDirectoryFactory = new FsDirectoryFactory(); + Directory directory = fsDirectoryFactory.newDirectory(settings, path); + assertFalse(directory instanceof SleepingLockWrapper); + if (preload.length == 0) { + assertTrue(directory.toString(), directory instanceof MMapDirectory); + assertFalse(((MMapDirectory) directory).getPreload()); + } else if (Arrays.asList(preload).contains("*")) { + assertTrue(directory.toString(), directory instanceof MMapDirectory); + assertTrue(((MMapDirectory) directory).getPreload()); + } else { + assertTrue(directory.toString(), directory instanceof FileSwitchDirectory); + FileSwitchDirectory fsd = (FileSwitchDirectory) directory; + assertTrue(fsd.getPrimaryDir() instanceof MMapDirectory); + assertTrue(((MMapDirectory) fsd.getPrimaryDir()).getPreload()); + assertTrue(fsd.getSecondaryDir() instanceof MMapDirectory); + assertFalse(((MMapDirectory) fsd.getSecondaryDir()).getPreload()); + } + } public void testStoreDirectory() throws IOException { Index index = new Index("foo", "fooUUID"); final Path tempDir = createTempDir().resolve(index.getUUID()).resolve("0"); // default - doTestStoreDirectory(index, tempDir, null, IndexModule.Type.FS); + doTestStoreDirectory(tempDir, null, IndexModule.Type.FS); // explicit directory impls for (IndexModule.Type type : IndexModule.Type.values()) { - doTestStoreDirectory(index, tempDir, type.name().toLowerCase(Locale.ROOT), type); + doTestStoreDirectory(tempDir, type.name().toLowerCase(Locale.ROOT), type); } } - private void doTestStoreDirectory(Index index, Path tempDir, String typeSettingValue, IndexModule.Type type) throws IOException { + private void doTestStoreDirectory(Path tempDir, String typeSettingValue, IndexModule.Type type) throws IOException { Settings.Builder settingsBuilder = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); if (typeSettingValue != null) { settingsBuilder.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), typeSettingValue); } Settings settings = settingsBuilder.build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); - FsDirectoryService service = new FsDirectoryService(indexSettings, new ShardPath(false, tempDir, tempDir, new ShardId(index, 0))); - try (Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) { + FsDirectoryFactory service = new FsDirectoryFactory(); + try (Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE, indexSettings)) { switch (type) { case HYBRIDFS: assertHybridDirectory(directory); @@ -91,8 +129,8 @@ private void doTestStoreDirectory(Index index, Path tempDir, String typeSettingV } private void assertHybridDirectory(Directory directory) { - assertTrue(directory.toString(), directory instanceof FsDirectoryService.HybridDirectory); - Directory randomAccessDirectory = ((FsDirectoryService.HybridDirectory) directory).getRandomAccessDirectory(); + assertTrue(directory.toString(), directory instanceof FsDirectoryFactory.HybridDirectory); + Directory randomAccessDirectory = ((FsDirectoryFactory.HybridDirectory) directory).getRandomAccessDirectory(); assertTrue("randomAccessDirectory: " + randomAccessDirectory.toString(), randomAccessDirectory instanceof MMapDirectory); } } diff --git a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java deleted file mode 100644 index e84ff3f32841b..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.store; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FileSwitchDirectory; -import org.apache.lucene.store.MMapDirectory; -import org.apache.lucene.store.SleepingLockWrapper; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexModule; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.IndexSettingsModule; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; - -public class FsDirectoryServiceTests extends ESTestCase { - - public void testPreload() throws IOException { - doTestPreload(); - doTestPreload("nvd", "dvd", "tim"); - doTestPreload("*"); - } - - private void doTestPreload(String...preload) throws IOException { - Settings build = Settings.builder() - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "mmapfs") - .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), preload) - .build(); - IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build); - Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0"); - Files.createDirectories(tempDir); - ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0)); - FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, path); - Directory directory = fsDirectoryService.newDirectory(); - assertFalse(directory instanceof SleepingLockWrapper); - if (preload.length == 0) { - assertTrue(directory.toString(), directory instanceof MMapDirectory); - assertFalse(((MMapDirectory) directory).getPreload()); - } else if (Arrays.asList(preload).contains("*")) { - assertTrue(directory.toString(), directory instanceof MMapDirectory); - assertTrue(((MMapDirectory) directory).getPreload()); - } else { - assertTrue(directory.toString(), directory instanceof FileSwitchDirectory); - FileSwitchDirectory fsd = (FileSwitchDirectory) directory; - assertTrue(fsd.getPrimaryDir() instanceof MMapDirectory); - assertTrue(((MMapDirectory) fsd.getPrimaryDir()).getPreload()); - assertTrue(fsd.getSecondaryDir() instanceof MMapDirectory); - assertFalse(((MMapDirectory) fsd.getSecondaryDir()).getPreload()); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java index d413c0f0be229..fac270172b079 100644 --- a/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/IndexStorePluginTests.java @@ -22,15 +22,13 @@ import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.node.MockNode; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; import java.util.Collections; import java.util.Map; -import java.util.function.Function; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.containsString; @@ -41,8 +39,8 @@ public class IndexStorePluginTests extends ESTestCase { public static class BarStorePlugin extends Plugin implements IndexStorePlugin { @Override - public Map> getIndexStoreFactories() { - return Collections.singletonMap("store", IndexStore::new); + public Map getDirectoryFactories() { + return Collections.singletonMap("store", new FsDirectoryFactory()); } } @@ -50,8 +48,8 @@ public Map> getIndexStoreFactories() public static class FooStorePlugin extends Plugin implements IndexStorePlugin { @Override - public Map> getIndexStoreFactories() { - return Collections.singletonMap("store", IndexStore::new); + public Map getDirectoryFactories() { + return Collections.singletonMap("store", new FsDirectoryFactory()); } } @@ -65,8 +63,8 @@ public static class ConflictingStorePlugin extends Plugin implements IndexStoreP } @Override - public Map> getIndexStoreFactories() { - return Collections.singletonMap(TYPE, IndexStore::new); + public Map getDirectoryFactories() { + return Collections.singletonMap(TYPE, new FsDirectoryFactory()); } } @@ -86,11 +84,11 @@ public void testDuplicateIndexStoreFactories() { if (JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0) { assertThat(e, hasToString(matches( "java.lang.IllegalStateException: Duplicate key store \\(attempted merging values " + - "org.elasticsearch.plugins.IndexStorePluginTests\\$BarStorePlugin.* " + - "and org.elasticsearch.plugins.IndexStorePluginTests\\$FooStorePlugin.*\\)"))); + "org.elasticsearch.index.store.FsDirectoryFactory@[\\w\\d]+ " + + "and org.elasticsearch.index.store.FsDirectoryFactory@[\\w\\d]+\\)"))); } else { assertThat(e, hasToString(matches( - "java.lang.IllegalStateException: Duplicate key org.elasticsearch.plugins.IndexStorePluginTests\\$BarStorePlugin.*"))); + "java.lang.IllegalStateException: Duplicate key org.elasticsearch.index.store.FsDirectoryFactory@[\\w\\d]+"))); } } diff --git a/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index b90d84e61f183..0a4d3201f5cc3 100644 --- a/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -37,7 +37,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.store.MockFSDirectoryService; +import org.elasticsearch.test.store.MockFSDirectoryFactory; import org.elasticsearch.test.store.MockFSIndexStore; import java.io.IOException; import java.util.Arrays; @@ -107,16 +107,16 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc client().admin().indices().prepareFlush("test").execute().get(); client().admin().indices().prepareClose("test").execute().get(); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate) - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate)); + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate) + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate)); client().admin().indices().prepareOpen("test").execute().get(); } else { Settings.Builder settings = Settings.builder() .put("index.number_of_replicas", randomIntBetween(0, 1)) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate) + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate) // we cannot expect that the index will be valid - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate); + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate); logger.info("creating index: [test] using settings: [{}]", settings.build()); client().admin().indices().prepareCreate("test") .setSettings(settings) @@ -198,8 +198,8 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc // check the index still contains the records that we indexed without errors client().admin().indices().prepareClose("test").execute().get(); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), 0) - .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), 0)); + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), 0) + .put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), 0)); client().admin().indices().prepareOpen("test").execute().get(); ensureGreen(); SearchResponse searchResponse = client().prepareSearch().setTypes("type") diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryFactory.java similarity index 78% rename from test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java rename to test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryFactory.java index 65a66989cdd97..58e881b296a7d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryFactory.java @@ -19,19 +19,16 @@ package org.elasticsearch.test.store; -import com.carrotsearch.randomizedtesting.SeedUtils; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestRuleMarkFailure; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Setting; @@ -41,8 +38,9 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.index.store.Store; +import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; import org.junit.Assert; @@ -51,11 +49,10 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.charset.StandardCharsets; -import java.nio.file.Path; import java.util.Arrays; import java.util.Random; -public class MockFSDirectoryService extends FsDirectoryService { +public class MockFSDirectoryFactory implements IndexStorePlugin.DirectoryFactory { public static final Setting RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING = Setting.doubleSetting("index.store.mock.random.io_exception_rate_on_open", 0.0d, 0.0d, Property.IndexScope, Property.NodeScope); @@ -64,42 +61,12 @@ public class MockFSDirectoryService extends FsDirectoryService { public static final Setting CRASH_INDEX_SETTING = Setting.boolSetting("index.store.mock.random.crash_index", true, Property.IndexScope, Property.NodeScope); - private final FsDirectoryService delegateService; - private final Random random; - private final double randomIOExceptionRate; - private final double randomIOExceptionRateOnOpen; - private final MockDirectoryWrapper.Throttling throttle; - private final boolean crashIndex; - - @Inject - public MockFSDirectoryService(IndexSettings idxSettings, final ShardPath path) { - super(idxSettings, path); - Settings indexSettings = idxSettings.getSettings(); - final long seed = idxSettings.getValue(ESIntegTestCase.INDEX_TEST_SEED_SETTING); - this.random = new Random(seed); - - randomIOExceptionRate = RANDOM_IO_EXCEPTION_RATE_SETTING.get(indexSettings); - randomIOExceptionRateOnOpen = RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.get(indexSettings); - random.nextInt(shardId.getId() + 1); // some randomness per shard - throttle = MockDirectoryWrapper.Throttling.NEVER; - crashIndex = CRASH_INDEX_SETTING.get(indexSettings); - - if (logger.isDebugEnabled()) { - logger.debug("Using MockDirWrapper with seed [{}] throttle: [{}] crashIndex: [{}]", SeedUtils.formatSeed(seed), - throttle, crashIndex); - } - delegateService = randomDirectoryService(idxSettings, path); - } - - - @Override - public Directory newDirectory() throws IOException { - return wrap(delegateService.newDirectory()); - } - @Override - protected synchronized Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - throw new UnsupportedOperationException(); + public Directory newDirectory(IndexSettings idxSettings, ShardPath path) throws IOException { + Settings indexSettings = idxSettings.getSettings(); + Random random = new Random(idxSettings.getValue(ESIntegTestCase.INDEX_TEST_SEED_SETTING)); + return wrap(randomDirectoryService(random, idxSettings, path), random, indexSettings, + path.getShardId()); } public static void checkIndex(Logger logger, Store store, ShardId shardId) { @@ -137,8 +104,14 @@ public static void checkIndex(Logger logger, Store store, ShardId shardId) { } } - private Directory wrap(Directory dir) { - final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, this.crashIndex); + private Directory wrap(Directory dir, Random random, Settings indexSettings, ShardId shardId) { + + double randomIOExceptionRate = RANDOM_IO_EXCEPTION_RATE_SETTING.get(indexSettings); + double randomIOExceptionRateOnOpen = RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.get(indexSettings); + random.nextInt(shardId.getId() + 1); // some randomness per shard + MockDirectoryWrapper.Throttling throttle = MockDirectoryWrapper.Throttling.NEVER; + boolean crashIndex = CRASH_INDEX_SETTING.get(indexSettings); + final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, crashIndex); w.setRandomIOExceptionRate(randomIOExceptionRate); w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen); w.setThrottling(throttle); @@ -150,7 +123,7 @@ private Directory wrap(Directory dir) { return w; } - private FsDirectoryService randomDirectoryService(IndexSettings indexSettings, ShardPath path) { + private Directory randomDirectoryService(Random random, IndexSettings indexSettings, ShardPath path) throws IOException { final IndexMetaData build = IndexMetaData.builder(indexSettings.getIndexMetaData()) .settings(Settings.builder() // don't use the settings from indexSettings#getSettings() they are merged with node settings and might contain @@ -160,7 +133,7 @@ private FsDirectoryService randomDirectoryService(IndexSettings indexSettings, S RandomPicks.randomFrom(random, IndexModule.Type.values()).getSettingsKey())) .build(); final IndexSettings newIndexSettings = new IndexSettings(build, indexSettings.getNodeSettings()); - return new FsDirectoryService(newIndexSettings, path); + return new FsDirectoryFactory().newDirectory(newIndexSettings, path); } public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper { diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 1ec5087605539..47a20803f7ac7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -26,14 +26,10 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.Plugin; @@ -43,9 +39,8 @@ import java.util.IdentityHashMap; import java.util.List; import java.util.Map; -import java.util.function.Function; -public class MockFSIndexStore extends IndexStore { +public final class MockFSIndexStore { public static final Setting INDEX_CHECK_INDEX_ON_CLOSE_SETTING = Setting.boolSetting("index.store.mock.check_index_on_close", true, Property.IndexScope, Property.NodeScope); @@ -59,14 +54,14 @@ public Settings additionalSettings() { @Override public List> getSettings() { return Arrays.asList(INDEX_CHECK_INDEX_ON_CLOSE_SETTING, - MockFSDirectoryService.CRASH_INDEX_SETTING, - MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING, - MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING); + MockFSDirectoryFactory.CRASH_INDEX_SETTING, + MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_SETTING, + MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING); } @Override - public Map> getIndexStoreFactories() { - return Collections.singletonMap("mock", MockFSIndexStore::new); + public Map getDirectoryFactories() { + return Collections.singletonMap("mock", new MockFSDirectoryFactory()); } @Override @@ -80,15 +75,6 @@ public void onIndexModule(IndexModule indexModule) { } } - MockFSIndexStore(IndexSettings indexSettings) { - super(indexSettings); - } - - @Override - public DirectoryService newDirectoryService(ShardPath path) { - return new MockFSDirectoryService(indexSettings, path); - } - private static final EnumSet validCheckIndexStates = EnumSet.of( IndexShardState.STARTED, IndexShardState.POST_RECOVERY ); @@ -101,7 +87,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha Boolean remove = shardSet.remove(indexShard); if (remove == Boolean.TRUE) { Logger logger = Loggers.getLogger(getClass(), indexShard.shardId()); - MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId()); + MockFSDirectoryFactory.checkIndex(logger, indexShard.store(), indexShard.shardId()); } } } @@ -115,5 +101,4 @@ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardSt } } - } From 93f3d12c759f63c12f6e773cfc514345627a4c16 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 24 May 2019 10:27:21 +0100 Subject: [PATCH 245/321] [ML] Reenable ml distributed failure test after issue resolution (#42431) Relates to issue #37117 --- .../xpack/ml/integration/MlDistributedFailureIT.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index 480f85798800b..40249c0bc771e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -62,7 +62,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37117") public void testFailOver() throws Exception { internalCluster().ensureAtLeastNumDataNodes(3); ensureStableClusterOnAllNodes(3); @@ -108,7 +107,6 @@ public void testLoseDedicatedMasterNode() throws Exception { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37117") public void testFullClusterRestart() throws Exception { internalCluster().ensureAtLeastNumDataNodes(3); ensureStableClusterOnAllNodes(3); From 306faa41e5337089d6e26d2d4be08d2a55257744 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 24 May 2019 12:41:49 +0100 Subject: [PATCH 246/321] [ML Data Frame] Reenable muted integration tests (#42373) Reverts muting of tests and simplifies the test teardown so that all data frames will be stopped This reverts commit 4a9438762a562d20e938d2ea82538805f33e85b1 --- .../xpack/dataframe/integration/DataFrameTransformIT.java | 1 - .../xpack/dataframe/integration/DataFrameAuditorIT.java | 2 -- .../integration/DataFrameConfigurationIndexIT.java | 2 -- .../dataframe/integration/DataFrameGetAndGetStatsIT.java | 2 -- .../xpack/dataframe/integration/DataFrameMetaDataIT.java | 2 -- .../xpack/dataframe/integration/DataFramePivotRestIT.java | 2 -- .../dataframe/integration/DataFrameRestTestCase.java | 8 +++++--- .../dataframe/integration/DataFrameTaskFailedStateIT.java | 2 -- .../integration/DataFrameTransformProgressIT.java | 1 - .../xpack/dataframe/integration/DataFrameUsageIT.java | 2 -- 10 files changed, 5 insertions(+), 19 deletions(-) diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index 363218d1b0f14..bce4a4a3b503b 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -30,7 +30,6 @@ public void cleanTransforms() throws IOException { cleanUp(); } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public void testDataFrameTransformCrud() throws Exception { createReviewsIndex(); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java index 7dc79c1ae8fbe..9884c9bb6793b 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.junit.Before; @@ -23,7 +22,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameAuditorIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java index d7e12cf2bee4d..681599331c8af 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java @@ -8,7 +8,6 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -23,7 +22,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { /** diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index 9bac6ca0b4049..d9927cd09ed8f 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -22,7 +21,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_user"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java index 5b95d1daead53..26a957ea055c2 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -16,7 +15,6 @@ import java.io.IOException; import java.util.Map; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameMetaDataIT extends DataFrameRestTestCase { private boolean indicesCreated = false; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index dab7e819881d2..770eaec7bd141 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.junit.Before; @@ -22,7 +21,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFramePivotRestIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index 7ffa5391b7a4a..23bff163031ce 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -301,10 +301,12 @@ public void wipeDataFrameTransforms() throws IOException { request.addParameter("timeout", "10s"); request.addParameter("ignore", "404"); adminClient().performRequest(request); + } + + for (Map transformConfig : transformConfigs) { + String transformId = (String) transformConfig.get("id"); String state = getDataFrameIndexerState(transformId); - if (state != null) { - assertEquals("stopped", getDataFrameIndexerState(transformId)); - } + assertEquals("Transform [" + transformId + "] indexer is not in the stopped state", "stopped", state); } for (Map transformConfig : transformConfigs) { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index 7b63644dd34ad..96aeeda8755f4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; @@ -20,7 +19,6 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { public void testDummy() { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java index d6ef3cc641be2..fea225ced3bd9 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java @@ -46,7 +46,6 @@ @LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTransformProgressIT extends ESRestTestCase { - protected void createReviewsIndex() throws Exception { final int numDocs = 1000; final RestHighLevelClient restClient = new TestRestHighLevelClient(); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java index f98fa6a271365..4f209c5a9f3f4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -23,7 +22,6 @@ import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE; import static org.elasticsearch.xpack.dataframe.DataFrameFeatureSet.PROVIDED_STATS; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameUsageIT extends DataFrameRestTestCase { private boolean indicesCreated = false; From 3907a6d1ea5d519fbfcbfde87ba58534b5cae82b Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 24 May 2019 14:17:21 +0100 Subject: [PATCH 247/321] Drain master task queue when stabilising (#42504) Today the default stabilisation time is calculated on the assumption that the elected master has no pending tasks to process when it is elected, but this is not a safe assumption to make. This can result in a cluster reaching the end of its stabilisation time without having stabilised. Furthermore in #36943 we increased the probability that each step in `runRandomly()` enqueues another task, vastly increasing the chance that we hit such a situation. This change extends the stabilisation process to allow time for all pending tasks, plus a task that might currently be in flight. Fixes #41967, in which the master entered the stabilisation phase with over 800 tasks to process. --- .../org/elasticsearch/cluster/coordination/Coordinator.java | 2 +- .../cluster/coordination/CoordinatorTests.java | 6 ++++++ .../indices/cluster/FakeThreadPoolMasterService.java | 4 ++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 6304588e3121a..1e7b38e50d1e9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -1231,7 +1231,7 @@ public void run() { @Override public String toString() { - return "scheduled timeout for " + this; + return "scheduled timeout for " + CoordinatorPublication.this; } }, publishTimeout, Names.GENERIC); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index b4d337a1bf57e..5daa863402b2a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -1515,6 +1515,10 @@ void stabilise(long stabilisationDurationMillis) { final ClusterNode leader = getAnyLeader(); final long leaderTerm = leader.coordinator.getCurrentTerm(); + + final int pendingTaskCount = leader.masterService.getFakeMasterServicePendingTaskCount(); + runFor((pendingTaskCount + 1) * DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "draining task queue"); + final Matcher isEqualToLeaderVersion = equalTo(leader.coordinator.getLastAcceptedState().getVersion()); final String leaderId = leader.getId(); @@ -1527,6 +1531,8 @@ void stabilise(long stabilisationDurationMillis) { assertFalse(nodeId + " should not have an active publication", clusterNode.coordinator.publicationInProgress()); if (clusterNode == leader) { + assertThat(nodeId + " is still the leader", clusterNode.coordinator.getMode(), is(LEADER)); + assertThat(nodeId + " did not change term", clusterNode.coordinator.getCurrentTerm(), is(leaderTerm)); continue; } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java b/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java index d535e9e00ee53..e1c7c3fafd274 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java @@ -84,6 +84,10 @@ public void execute(Runnable command) { }; } + public int getFakeMasterServicePendingTaskCount() { + return pendingTasks.size(); + } + private void scheduleNextTaskIfNecessary() { if (taskInProgress == false && pendingTasks.isEmpty() == false && scheduledNextTask == false) { scheduledNextTask = true; From 631142d5dd088a10de8dcd939b50a14301173283 Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Fri, 24 May 2019 15:20:14 +0200 Subject: [PATCH 248/321] Fix sorting on nested field with unmapped (#42451) Previously sorting on a missing nested field would fail with an Exception: `[nested_field] failed to find nested object under path [nested_path]` despite `unmapped_type` being set on the query. Fixes: #33644 --- .../search/sort/FieldSortBuilder.java | 30 +++++++++++-------- .../search/sort/FieldSortIT.java | 16 ++++++++++ 2 files changed, 33 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 2be73a0da9cb6..8abd4b9f40d5c 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -373,8 +373,10 @@ public SortFieldAndFormat build(QueryShardContext context) throws IOException { return SORT_DOC; } } else { + boolean isUnmapped = false; MappedFieldType fieldType = context.fieldMapper(fieldName); if (fieldType == null) { + isUnmapped = true; if (unmappedType != null) { fieldType = context.getMapperService().unmappedFieldType(unmappedType); } else { @@ -392,20 +394,22 @@ public SortFieldAndFormat build(QueryShardContext context) throws IOException { localSortMode = reverse ? MultiValueMode.MAX : MultiValueMode.MIN; } - final Nested nested; - if (nestedSort != null) { - if (context.indexVersionCreated().before(Version.V_6_5_0) && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { - throw new QueryShardException(context, - "max_children is only supported on v6.5.0 or higher"); - } - if (nestedSort.getNestedSort() != null && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { - throw new QueryShardException(context, - "max_children is only supported on last level of nested sort"); + Nested nested = null; + if (isUnmapped == false) { + if (nestedSort != null) { + if (context.indexVersionCreated().before(Version.V_6_5_0) && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { + throw new QueryShardException(context, + "max_children is only supported on v6.5.0 or higher"); + } + if (nestedSort.getNestedSort() != null && nestedSort.getMaxChildren() != Integer.MAX_VALUE) { + throw new QueryShardException(context, + "max_children is only supported on last level of nested sort"); + } + // new nested sorts takes priority + nested = resolveNested(context, nestedSort); + } else { + nested = resolveNested(context, nestedPath, nestedFilter); } - // new nested sorts takes priority - nested = resolveNested(context, nestedSort); - } else { - nested = resolveNested(context, nestedPath, nestedFilter); } IndexFieldData fieldData = context.getForField(fieldType); diff --git a/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java index 526fe0a48b575..d3f21867ab1d1 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -903,6 +903,22 @@ public void testIgnoreUnmapped() throws Exception { .addSort(SortBuilders.fieldSort("kkk").unmappedType("keyword")) .get(); assertNoFailures(searchResponse); + + // nested field + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo").unmappedType("keyword") + .setNestedSort(new NestedSortBuilder("nested").setNestedSort(new NestedSortBuilder("nested.foo")))) + .get(); + assertNoFailures(searchResponse); + + // nestedQuery + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo").unmappedType("keyword") + .setNestedSort(new NestedSortBuilder("nested").setFilter(QueryBuilders.termQuery("nested.foo", "abc")))) + .get(); + assertNoFailures(searchResponse); } public void testSortMVField() throws Exception { From cbf1150d845faafe32e005b0a8749a97803b1b34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 24 May 2019 15:27:10 +0200 Subject: [PATCH 249/321] Small internal AnalysisRegistry changes (#42500) Some internal refactorings to the AnalysisRegistry, spin-off from #40782. --- .../index/analysis/AnalysisRegistry.java | 79 ++++++++----------- 1 file changed, 33 insertions(+), 46 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index d9c4b2c510bc9..684d36c311f8b 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -39,6 +38,7 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; @@ -156,19 +156,18 @@ public void close() throws IOException { * Creates an index-level {@link IndexAnalyzers} from this registry using the given index settings */ public IndexAnalyzers build(IndexSettings indexSettings) throws IOException { - final Map charFilterFactories = buildCharFilterFactories(indexSettings); final Map tokenizerFactories = buildTokenizerFactories(indexSettings); final Map tokenFilterFactories = buildTokenFilterFactories(indexSettings); - final Map> analyzierFactories = buildAnalyzerFactories(indexSettings); + final Map> analyzerFactories = buildAnalyzerFactories(indexSettings); final Map> normalizerFactories = buildNormalizerFactories(indexSettings); - return build(indexSettings, analyzierFactories, normalizerFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories); + return build(indexSettings, analyzerFactories, normalizerFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories); } public Map buildTokenFilterFactories(IndexSettings indexSettings) throws IOException { final Map tokenFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_FILTER); - return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, - Collections.unmodifiableMap(this.tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters); + return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, this.tokenFilters, + prebuiltAnalysis.preConfiguredTokenFilters); } public Map buildTokenizerFactories(IndexSettings indexSettings) throws IOException { @@ -202,13 +201,8 @@ public Map> buildNormalizerFactories(IndexSettings i * @return {@link TokenizerFactory} provider or null */ public AnalysisProvider getTokenizerProvider(String tokenizer, IndexSettings indexSettings) { - final Map tokenizerSettings = indexSettings.getSettings().getGroups("index.analysis.tokenizer"); - if (tokenizerSettings.containsKey(tokenizer)) { - Settings currentSettings = tokenizerSettings.get(tokenizer); - return getAnalysisProvider(Component.TOKENIZER, tokenizers, tokenizer, currentSettings.get("type")); - } else { - return getTokenizerProvider(tokenizer); - } + return getProvider(Component.TOKENIZER, tokenizer, indexSettings, "index.analysis.tokenizer", tokenizers, + this::getTokenizerProvider); } /** @@ -220,14 +214,8 @@ public AnalysisProvider getTokenizerProvider(String tokenizer, * @return {@link TokenFilterFactory} provider or null */ public AnalysisProvider getTokenFilterProvider(String tokenFilter, IndexSettings indexSettings) { - final Map tokenFilterSettings = indexSettings.getSettings().getGroups("index.analysis.filter"); - if (tokenFilterSettings.containsKey(tokenFilter)) { - Settings currentSettings = tokenFilterSettings.get(tokenFilter); - String typeName = currentSettings.get("type"); - return getAnalysisProvider(Component.FILTER, tokenFilters, tokenFilter, typeName); - } else { - return getTokenFilterProvider(tokenFilter); - } + return getProvider(Component.FILTER, tokenFilter, indexSettings, "index.analysis.filter", tokenFilters, + this::getTokenFilterProvider); } /** @@ -239,12 +227,18 @@ public AnalysisProvider getTokenFilterProvider(String tokenF * @return {@link CharFilterFactory} provider or null */ public AnalysisProvider getCharFilterProvider(String charFilter, IndexSettings indexSettings) { - final Map tokenFilterSettings = indexSettings.getSettings().getGroups("index.analysis.char_filter"); - if (tokenFilterSettings.containsKey(charFilter)) { - Settings currentSettings = tokenFilterSettings.get(charFilter); - return getAnalysisProvider(Component.CHAR_FILTER, charFilters, charFilter, currentSettings.get("type")); + return getProvider(Component.CHAR_FILTER, charFilter, indexSettings, "index.analysis.char_filter", charFilters, + this::getCharFilterProvider); + } + + private AnalysisProvider getProvider(Component componentType, String componentName, IndexSettings indexSettings, + String componentSettings, Map> providers, Function> providerFunction) { + final Map subSettings = indexSettings.getSettings().getGroups(componentSettings); + if (subSettings.containsKey(componentName)) { + Settings currentSettings = subSettings.get(componentName); + return getAnalysisProvider(componentType, providers, componentName, currentSettings.get("type")); } else { - return getCharFilterProvider(charFilter); + return providerFunction.apply(componentName); } } @@ -323,9 +317,9 @@ private Map buildMapping(Component component, IndexSettings setti } // go over the char filters in the bindings and register the ones that are not configured - for (Map.Entry> entry : providerMap.entrySet()) { + for (Map.Entry> entry : providerMap.entrySet()) { String name = entry.getKey(); - AnalysisModule.AnalysisProvider provider = entry.getValue(); + AnalysisProvider provider = entry.getValue(); // we don't want to re-register one that already exists if (settingsMap.containsKey(name)) { continue; @@ -334,7 +328,7 @@ private Map buildMapping(Component component, IndexSettings setti if (provider.requiresAnalysisSettings()) { continue; } - AnalysisModule.AnalysisProvider defaultProvider = defaultInstance.get(name); + AnalysisProvider defaultProvider = defaultInstance.get(name); final T instance; if (defaultProvider == null) { instance = provider.get(settings, environment, name, defaultSettings); @@ -344,20 +338,15 @@ private Map buildMapping(Component component, IndexSettings setti factories.put(name, instance); } - for (Map.Entry> entry : defaultInstance.entrySet()) { + for (Map.Entry> entry : defaultInstance.entrySet()) { final String name = entry.getKey(); - final AnalysisModule.AnalysisProvider provider = entry.getValue(); - if (factories.containsKey(name) == false) { - final T instance = provider.get(settings, environment, name, defaultSettings); - if (factories.containsKey(name) == false) { - factories.put(name, instance); - } - } + final AnalysisProvider provider = entry.getValue(); + factories.putIfAbsent(name, provider.get(settings, environment, name, defaultSettings)); } return factories; } - private AnalysisProvider getAnalysisProvider(Component component, Map> providerMap, + private static AnalysisProvider getAnalysisProvider(Component component, Map> providerMap, String name, String typeName) { if (typeName == null) { throw new IllegalArgumentException(component + " [" + name + "] must specify either an analyzer type, or a tokenizer"); @@ -371,7 +360,7 @@ private AnalysisProvider getAnalysisProvider(Component component, Map>> analyzerProviderFactories; + final Map>> analyzerProviderFactories; final Map> preConfiguredTokenFilters; final Map> preConfiguredTokenizers; final Map> preConfiguredCharFilterFactories; @@ -396,19 +385,19 @@ private PrebuiltAnalysis( this.preConfiguredTokenizers = preConfiguredTokenizers; } - public AnalysisModule.AnalysisProvider getCharFilterFactory(String name) { + public AnalysisProvider getCharFilterFactory(String name) { return preConfiguredCharFilterFactories.get(name); } - public AnalysisModule.AnalysisProvider getTokenFilterFactory(String name) { + public AnalysisProvider getTokenFilterFactory(String name) { return preConfiguredTokenFilters.get(name); } - public AnalysisModule.AnalysisProvider getTokenizerFactory(String name) { + public AnalysisProvider getTokenizerFactory(String name) { return preConfiguredTokenizers.get(name); } - public AnalysisModule.AnalysisProvider> getAnalyzerProvider(String name) { + public AnalysisProvider> getAnalyzerProvider(String name) { return analyzerProviderFactories.get(name); } @@ -426,8 +415,6 @@ public IndexAnalyzers build(IndexSettings indexSettings, Map charFilterFactoryFactories, Map tokenFilterFactoryFactories) { - Index index = indexSettings.getIndex(); - analyzerProviders = new HashMap<>(analyzerProviders); Map analyzers = new HashMap<>(); Map normalizers = new HashMap<>(); Map whitespaceNormalizers = new HashMap<>(); @@ -458,7 +445,7 @@ public IndexAnalyzers build(IndexSettings indexSettings, if (analyzers.containsKey("default_index")) { throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use " + - "[index.analysis.analyzer.default] instead for index [" + index.getName() + "]"); + "[index.analysis.analyzer.default] instead for index [" + indexSettings.getIndex().getName() + "]"); } for (Map.Entry analyzer : analyzers.entrySet()) { From da1ba685b16018053b28847cf4618d0ddf9c40fb Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Fri, 24 May 2019 07:44:56 -0700 Subject: [PATCH 250/321] remove 6.3.x constants (#42087) relates to refactoring effort #41164. --- .../client/ml/job/process/ModelSnapshot.java | 2 +- .../index/rankeval/RankEvalRequest.java | 28 +-- .../main/java/org/elasticsearch/Build.java | 35 +-- .../main/java/org/elasticsearch/Version.java | 14 -- .../index/mapper/DynamicTemplate.java | 26 +-- .../index/mapper/RootObjectMapper.java | 2 +- .../indices/flush/SyncedFlushService.java | 13 +- .../search/slice/SliceBuilder.java | 14 +- .../java/org/elasticsearch/BuildTests.java | 2 +- .../java/org/elasticsearch/VersionTests.java | 36 +-- .../health/ClusterHealthRequestTests.java | 5 +- .../common/geo/GeoWKTShapeParserTests.java | 6 +- .../index/mapper/DynamicTemplateTests.java | 19 +- .../mapper/LegacyDynamicMappingTests.java | 67 ------ .../mapper/LegacyMapperServiceTests.java | 92 -------- .../similarity/LegacySimilarityTests.java | 93 -------- .../LegacyUpdateMappingIntegrationIT.java | 212 ------------------ .../search/slice/SliceBuilderTests.java | 15 -- .../rest/yaml/section/SetupSectionTests.java | 3 +- .../license/PostStartTrialRequest.java | 31 +-- .../license/PostStartTrialResponse.java | 60 ++--- .../elasticsearch/xpack/core/XPackPlugin.java | 7 +- .../ml/action/PostCalendarEventsAction.java | 9 - .../core/ml/action/PutCalendarAction.java | 9 - .../core/ml/action/PutDatafeedAction.java | 9 - .../xpack/core/ml/action/PutJobAction.java | 9 - .../ml/action/RevertModelSnapshotAction.java | 9 - .../xpack/core/ml/action/UpdateJobAction.java | 7 - .../xpack/core/ml/job/config/JobUpdate.java | 14 +- .../autodetect/state/ModelSnapshot.java | 2 +- .../monitoring/MonitoringFeatureSetUsage.java | 9 +- .../action/MonitoringBulkResponse.java | 11 +- .../xpack/core/rollup/job/RollupJob.java | 2 +- .../core/security/user/BeatsSystemUser.java | 2 - .../license/XPackLicenseStateTests.java | 2 +- .../xpack/core/XPackPluginTests.java | 4 +- .../xpack/ml/MlConfigMigratorTests.java | 3 +- .../action/TransportOpenJobActionTests.java | 3 +- .../monitoring/MonitoringFeatureSetTests.java | 7 +- .../action/MonitoringBulkResponseTests.java | 7 +- .../authc/esnative/ReservedRealm.java | 24 +- .../support/SecurityIndexManager.java | 6 - .../user/TransportGetUsersActionTests.java | 1 - .../authc/esnative/ReservedRealmTests.java | 11 - .../support/SecurityIndexManagerTests.java | 8 - .../xpack/restart/FullClusterRestartIT.java | 1 - 46 files changed, 110 insertions(+), 841 deletions(-) delete mode 100644 server/src/test/java/org/elasticsearch/index/mapper/LegacyDynamicMappingTests.java delete mode 100644 server/src/test/java/org/elasticsearch/index/mapper/LegacyMapperServiceTests.java delete mode 100644 server/src/test/java/org/elasticsearch/index/similarity/LegacySimilarityTests.java delete mode 100644 server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java index 5d95e091d40b1..6a92eaf019021 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java @@ -226,7 +226,7 @@ public static class Builder { private String jobId; // Stored snapshot documents created prior to 6.3.0 will have no value for min_version. - private Version minVersion = Version.V_6_3_0; + private Version minVersion = Version.fromString("6.3.0"); private Date timestamp; private String description; diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java index 0dbbb9f90f1fa..f02ce8fe23496 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -51,18 +50,8 @@ public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) { RankEvalRequest(StreamInput in) throws IOException { super.readFrom(in); rankingEvaluationSpec = new RankEvalSpec(in); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - } else { - // readStringArray uses readVInt for size, we used readInt in 6.2 - int indicesSize = in.readInt(); - String[] indices = new String[indicesSize]; - for (int i = 0; i < indicesSize; i++) { - indices[i] = in.readString(); - } - // no indices options yet - } + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); } RankEvalRequest() { @@ -131,17 +120,8 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); rankingEvaluationSpec.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - out.writeStringArray(indices); - indicesOptions.writeIndicesOptions(out); - } else { - // writeStringArray uses writeVInt for size, we used writeInt in 6.2 - out.writeInt(indices.length); - for (String index : indices) { - out.writeString(index); - } - // no indices options yet - } + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); } @Override diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 1a1ee2744f77a..bc62c3a3ddd27 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -224,45 +224,26 @@ public String date() { public static Build readBuild(StreamInput in) throws IOException { final Flavor flavor; final Type type; - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - flavor = Flavor.fromDisplayName(in.readString(), false); - } else { - flavor = Flavor.OSS; - } - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - type = Type.fromDisplayName(in.readString(), false); - } else { - type = Type.UNKNOWN; - } + // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know + flavor = Flavor.fromDisplayName(in.readString(), false); + // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know + type = Type.fromDisplayName(in.readString(), false); String hash = in.readString(); String date = in.readString(); boolean snapshot = in.readBoolean(); final String version; - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { - version = in.readString(); - } else { - version = in.getVersion().toString(); - } + version = in.readString(); return new Build(flavor, type, hash, date, snapshot, version); } public static void writeBuild(Build build, StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - out.writeString(build.flavor().displayName()); - } - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - final Type buildType = build.type(); - out.writeString(buildType.displayName()); - } + out.writeString(build.flavor().displayName()); + out.writeString(build.type().displayName()); out.writeString(build.shortHash()); out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { - out.writeString(build.getQualifiedVersion()); - } + out.writeString(build.getQualifiedVersion()); } /** diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 7f939ca627a95..c685d39c7562f 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -46,14 +46,6 @@ public class Version implements Comparable, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - // The below version is missing from the 7.3 JAR - private static final org.apache.lucene.util.Version LUCENE_7_2_1 = org.apache.lucene.util.Version.fromBits(7, 2, 1); - public static final int V_6_3_0_ID = 6030099; - public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); - public static final int V_6_3_1_ID = 6030199; - public static final Version V_6_3_1 = new Version(V_6_3_1_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); - public static final int V_6_3_2_ID = 6030299; - public static final Version V_6_3_2 = new Version(V_6_3_2_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_4_0_ID = 6040099; public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_4_1_ID = 6040199; @@ -152,12 +144,6 @@ public static Version fromId(int id) { return V_6_4_1; case V_6_4_0_ID: return V_6_4_0; - case V_6_3_2_ID: - return V_6_3_2; - case V_6_3_1_ID: - return V_6_3_1; - case V_6_3_0_ID: - return V_6_3_0; case V_EMPTY_ID: return V_EMPTY; default: diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java index 30c9606acd928..b271084a0d293 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -160,8 +159,7 @@ public static XContentFieldType fromString(String value) { public abstract String defaultMappingType(); } - public static DynamicTemplate parse(String name, Map conf, - Version indexVersionCreated) throws MapperParsingException { + public static DynamicTemplate parse(String name, Map conf) throws MapperParsingException { String match = null; String pathMatch = null; String unmatch = null; @@ -207,18 +205,16 @@ public static DynamicTemplate parse(String name, Map conf, final MatchType matchType = MatchType.fromString(matchPattern); - if (indexVersionCreated.onOrAfter(Version.V_6_3_0)) { - // Validate that the pattern - for (String regex : new String[] { pathMatch, match, pathUnmatch, unmatch }) { - if (regex == null) { - continue; - } - try { - matchType.matches(regex, ""); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException("Pattern [" + regex + "] of type [" + matchType - + "] is invalid. Cannot create dynamic template [" + name + "].", e); - } + // Validate that the pattern + for (String regex : new String[] { pathMatch, match, pathUnmatch, unmatch }) { + if (regex == null) { + continue; + } + try { + matchType.matches(regex, ""); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Pattern [" + regex + "] of type [" + matchType + + "] is invalid. Cannot create dynamic template [" + name + "].", e); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 6d2f0fddd86c2..89b1810bf393c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -173,7 +173,7 @@ protected boolean processField(RootObjectMapper.Builder builder, String fieldNam Map.Entry entry = tmpl.entrySet().iterator().next(); String templateName = entry.getKey(); Map templateParams = (Map) entry.getValue(); - DynamicTemplate template = DynamicTemplate.parse(templateName, templateParams, indexVersionCreated); + DynamicTemplate template = DynamicTemplate.parse(templateName, templateParams); if (template != null) { templates.add(template); } diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 6291531b7f907..fc8c6fcef98c6 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.StepListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -594,18 +593,12 @@ static final class PreSyncedFlushResponse extends TransportResponse { this.existingSyncId = existingSyncId; } - boolean includeExistingSyncId(Version version) { - return version.onOrAfter(Version.V_6_3_0); - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); commitId = new Engine.CommitId(in); numDocs = in.readInt(); - if (includeExistingSyncId(in.getVersion())) { - existingSyncId = in.readOptionalString(); - } + existingSyncId = in.readOptionalString(); } @Override @@ -613,9 +606,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); commitId.writeTo(out); out.writeInt(numDocs); - if (includeExistingSyncId(out.getVersion())) { - out.writeOptionalString(existingSyncId); - } + out.writeOptionalString(existingSyncId); } } diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 08f042aa69650..3c86b21a0873d 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -54,7 +54,7 @@ /** * A slice builder allowing to split a scroll in multiple partitions. - * If the provided field is the "_uid" it uses a {@link org.elasticsearch.search.slice.TermsSliceQuery} + * If the provided field is the "_id" it uses a {@link org.elasticsearch.search.slice.TermsSliceQuery} * to do the slicing. The slicing is done at the shard level first and then each shard is split into multiple slices. * For instance if the number of shards is equal to 2 and the user requested 4 slices * then the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. @@ -79,7 +79,7 @@ public class SliceBuilder implements Writeable, ToXContentObject { PARSER.declareInt(SliceBuilder::setMax, MAX_FIELD); } - /** Name of field to slice against (_uid by default) */ + /** Name of field to slice against (_id by default) */ private String field = IdFieldMapper.NAME; /** The id of the slice */ private int id = -1; @@ -249,15 +249,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, String field = this.field; boolean useTermQuery = false; - if ("_uid".equals(field)) { - // on new indices, the _id acts as a _uid - field = IdFieldMapper.NAME; - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_7_0_0)) { - throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); - } - DEPRECATION_LOG.deprecated("Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead"); - useTermQuery = true; - } else if (IdFieldMapper.NAME.equals(field)) { + if (IdFieldMapper.NAME.equals(field)) { useTermQuery = true; } else if (type.hasDocValues() == false) { throw new IllegalArgumentException("cannot load numeric doc values on " + field); diff --git a/server/src/test/java/org/elasticsearch/BuildTests.java b/server/src/test/java/org/elasticsearch/BuildTests.java index 1945c51d1514f..7a6f581bd7369 100644 --- a/server/src/test/java/org/elasticsearch/BuildTests.java +++ b/server/src/test/java/org/elasticsearch/BuildTests.java @@ -192,7 +192,7 @@ public void testSerialization() { throw new AssertionError(); }); } - + public void testFlavorParsing() { for (final Build.Flavor flavor : Build.Flavor.values()) { // strict or not should not impact parsing at all here diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 66d7af0a4b20e..e5149b9bce515 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -36,8 +36,6 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.Version.V_6_3_0; -import static org.elasticsearch.Version.V_7_0_0; import static org.elasticsearch.test.VersionUtils.allVersions; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; @@ -50,30 +48,32 @@ public class VersionTests extends ESTestCase { public void testVersionComparison() throws Exception { - assertThat(V_6_3_0.before(V_7_0_0), is(true)); - assertThat(V_6_3_0.before(V_6_3_0), is(false)); - assertThat(V_7_0_0.before(V_6_3_0), is(false)); + Version V_7_2_0 = Version.fromString("7.2.0"); + Version V_8_0_0 = Version.fromString("8.0.0"); + assertThat(V_7_2_0.before(V_8_0_0), is(true)); + assertThat(V_7_2_0.before(V_7_2_0), is(false)); + assertThat(V_8_0_0.before(V_7_2_0), is(false)); - assertThat(V_6_3_0.onOrBefore(V_7_0_0), is(true)); - assertThat(V_6_3_0.onOrBefore(V_6_3_0), is(true)); - assertThat(V_7_0_0.onOrBefore(V_6_3_0), is(false)); + assertThat(V_7_2_0.onOrBefore(V_8_0_0), is(true)); + assertThat(V_7_2_0.onOrBefore(V_7_2_0), is(true)); + assertThat(V_8_0_0.onOrBefore(V_7_2_0), is(false)); - assertThat(V_6_3_0.after(V_7_0_0), is(false)); - assertThat(V_6_3_0.after(V_6_3_0), is(false)); - assertThat(V_7_0_0.after(V_6_3_0), is(true)); + assertThat(V_7_2_0.after(V_8_0_0), is(false)); + assertThat(V_7_2_0.after(V_7_2_0), is(false)); + assertThat(V_8_0_0.after(V_7_2_0), is(true)); - assertThat(V_6_3_0.onOrAfter(V_7_0_0), is(false)); - assertThat(V_6_3_0.onOrAfter(V_6_3_0), is(true)); - assertThat(V_7_0_0.onOrAfter(V_6_3_0), is(true)); + assertThat(V_7_2_0.onOrAfter(V_8_0_0), is(false)); + assertThat(V_7_2_0.onOrAfter(V_7_2_0), is(true)); + assertThat(V_8_0_0.onOrAfter(V_7_2_0), is(true)); assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1"))); assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2"))); assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24"))); assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0"))); - assertThat(V_6_3_0, is(lessThan(V_7_0_0))); - assertThat(V_6_3_0.compareTo(V_6_3_0), is(0)); - assertThat(V_7_0_0, is(greaterThan(V_6_3_0))); + assertThat(V_7_2_0, is(lessThan(V_8_0_0))); + assertThat(V_7_2_0.compareTo(V_7_2_0), is(0)); + assertThat(V_8_0_0, is(greaterThan(V_7_2_0))); } public void testMin() { @@ -182,7 +182,7 @@ public void testMinCompatVersion() { Version major56x = Version.fromString("5.6.0"); assertThat(Version.V_6_5_0.minimumCompatibilityVersion(), equalTo(major56x)); - assertThat(Version.V_6_3_1.minimumCompatibilityVersion(), equalTo(major56x)); + assertThat(Version.fromString("6.3.1").minimumCompatibilityVersion(), equalTo(major56x)); // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java index e532d245ec8e3..a901c8c9bc2e1 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.util.Locale; @@ -59,7 +60,7 @@ public void testBwcSerialization() throws Exception { for (int runs = 0; runs < randomIntBetween(5, 20); runs++) { // Generate a random cluster health request in version < 7.2.0 and serializes it final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_2_0))); + out.setVersion(randomVersionBetween(random(), VersionUtils.getFirstVersion(), getPreviousVersion(Version.V_7_2_0))); final ClusterHealthRequest expected = randomRequest(); { @@ -114,7 +115,7 @@ public void testBwcSerialization() throws Exception { // Serialize to node in version < 7.2.0 final BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_2_0))); + out.setVersion(randomVersionBetween(random(), VersionUtils.getFirstVersion(), getPreviousVersion(Version.V_7_2_0))); expected.writeTo(out); // Deserialize and check the cluster health request diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index 286e1ce6ee7c5..5835ab6a06c14 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -342,7 +342,7 @@ public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { parser.nextToken(); Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); @@ -372,7 +372,7 @@ public void testParsePolyWithStoredZ() throws IOException { parser.nextToken(); Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); @@ -393,7 +393,7 @@ public void testParseOpenPolygon() throws IOException { parser.nextToken(); Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java index 5604f4240ce53..c41c242317d9c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplateTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -40,7 +39,7 @@ public void testParseUnknownParam() throws Exception { templateDef.put("random_param", "random_value"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion())); + () -> DynamicTemplate.parse("my_template", templateDef)); assertEquals("Illegal dynamic template parameter: [random_param]", e.getMessage()); } @@ -50,7 +49,7 @@ public void testParseUnknownMatchType() { templateDef2.put("mapping", Collections.singletonMap("store", true)); // if a wrong match type is specified, we ignore the template IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef2, Version.CURRENT.minimumIndexCompatibilityVersion())); + () -> DynamicTemplate.parse("my_template", templateDef2)); assertEquals("No field type matched on [text], possible values are [object, string, long, double, boolean, date, binary]", e.getMessage()); } @@ -63,7 +62,7 @@ public void testParseInvalidRegex() { templateDef.put("match_pattern", "regex"); templateDef.put("mapping", Collections.singletonMap("store", true)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion())); + () -> DynamicTemplate.parse("my_template", templateDef)); assertEquals("Pattern [*a] of type [regex] is invalid. Cannot create dynamic template [my_template].", e.getMessage()); } } @@ -72,7 +71,7 @@ public void testMatchAllTemplate() { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "*"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef); assertTrue(template.match("a.b", "b", randomFrom(XContentFieldType.values()))); } @@ -80,7 +79,7 @@ public void testMatchTypeTemplate() { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef); assertTrue(template.match("a.b", "b", XContentFieldType.STRING)); assertFalse(template.match("a.b", "b", XContentFieldType.BOOLEAN)); } @@ -90,7 +89,7 @@ public void testSerialization() throws Exception { Map templateDef = new HashMap<>(); templateDef.put("match_mapping_type", "string"); templateDef.put("mapping", Collections.singletonMap("store", true)); - DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef); XContentBuilder builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match_mapping_type\":\"string\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -100,7 +99,7 @@ public void testSerialization() throws Exception { templateDef.put("match", "*name"); templateDef.put("unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + template = DynamicTemplate.parse("my_template", templateDef); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"*name\",\"unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); @@ -110,7 +109,7 @@ public void testSerialization() throws Exception { templateDef.put("path_match", "*name"); templateDef.put("path_unmatch", "first_name"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + template = DynamicTemplate.parse("my_template", templateDef); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"path_match\":\"*name\",\"path_unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", @@ -121,7 +120,7 @@ public void testSerialization() throws Exception { templateDef.put("match", "^a$"); templateDef.put("match_pattern", "regex"); templateDef.put("mapping", Collections.singletonMap("store", true)); - template = DynamicTemplate.parse("my_template", templateDef, Version.CURRENT.minimumIndexCompatibilityVersion()); + template = DynamicTemplate.parse("my_template", templateDef); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals("{\"match\":\"^a$\",\"match_pattern\":\"regex\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LegacyDynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LegacyDynamicMappingTests.java deleted file mode 100644 index 42d6aa8951c67..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/LegacyDynamicMappingTests.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import java.io.IOException; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; - -public class LegacyDynamicMappingTests extends ESSingleNodeTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testTypeNotCreatedOnIndexFailure() throws IOException { - final Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_3_0).build(); - try (XContentBuilder mapping = jsonBuilder()) { - mapping.startObject(); - { - mapping.startObject("_default_"); - { - mapping.field("dynamic", "strict"); - } - mapping.endObject(); - } - mapping.endObject(); - createIndex("test", settings, "_default_", mapping); - } - try (XContentBuilder sourceBuilder = jsonBuilder().startObject().field("test", "test").endObject()) { - expectThrows(StrictDynamicMappingException.class, () -> client() - .prepareIndex() - .setIndex("test") - .setType("type") - .setSource(sourceBuilder) - .get()); - - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get(); - assertNull(getMappingsResponse.getMappings().get("test").get("type")); - } - } - -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LegacyMapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LegacyMapperServiceTests.java deleted file mode 100644 index 33f9bd51f33db..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/LegacyMapperServiceTests.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import java.io.IOException; - -public class LegacyMapperServiceTests extends ESSingleNodeTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testIndexMetaDataUpdateDoesNotLoseDefaultMapper() throws IOException { - final IndexService indexService = - createIndex("test", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build()); - try (XContentBuilder builder = JsonXContent.contentBuilder()) { - builder.startObject(); - { - builder.startObject(MapperService.DEFAULT_MAPPING); - { - builder.field("date_detection", false); - } - builder.endObject(); - } - builder.endObject(); - final PutMappingRequest putMappingRequest = new PutMappingRequest(); - putMappingRequest.indices("test"); - putMappingRequest.type(MapperService.DEFAULT_MAPPING); - putMappingRequest.source(builder); - client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(builder).get(); - } - assertNotNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); - final Settings zeroReplicasSettings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build(); - client().admin().indices().prepareUpdateSettings("test").setSettings(zeroReplicasSettings).get(); - /* - * This assertion is a guard against a previous bug that would lose the default mapper when applying a metadata update that did not - * update the default mapping. - */ - assertNotNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); - } - - public void testDefaultMappingIsDeprecatedOn6() throws IOException { - final Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build(); - final String mapping; - try (XContentBuilder defaultMapping = XContentFactory.jsonBuilder()) { - defaultMapping.startObject(); - { - defaultMapping.startObject("_default_"); - { - - } - defaultMapping.endObject(); - } - defaultMapping.endObject(); - mapping = Strings.toString(defaultMapping); - } - final MapperService mapperService = createIndex("test", settings).mapperService(); - mapperService.merge("_default_", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); - assertWarnings("[_default_] mapping is deprecated since it is not useful anymore now that indexes cannot have more than one type"); - } - -} diff --git a/server/src/test/java/org/elasticsearch/index/similarity/LegacySimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/LegacySimilarityTests.java deleted file mode 100644 index 13398d8791437..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/similarity/LegacySimilarityTests.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; -import org.apache.lucene.search.similarity.LegacyBM25Similarity; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import java.io.IOException; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; - -public class LegacySimilarityTests extends ESSingleNodeTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - public void testResolveDefaultSimilaritiesOn6xIndex() { - final Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) // otherwise classic is forbidden - .build(); - final SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); - assertThat(similarityService.getSimilarity("classic").get(), instanceOf(ClassicSimilarity.class)); - assertWarnings("The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead."); - assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(LegacyBM25Similarity.class)); - assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); - assertThat(similarityService.getSimilarity("default"), equalTo(null)); - } - - public void testResolveSimilaritiesFromMappingClassic() throws IOException { - try (XContentBuilder mapping = XContentFactory.jsonBuilder()) { - mapping.startObject(); - { - mapping.startObject("type"); - { - mapping.startObject("properties"); - { - mapping.startObject("field1"); - { - mapping.field("type", "text"); - mapping.field("similarity", "my_similarity"); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - - final Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_6_3_0) // otherwise classic is forbidden - .put("index.similarity.my_similarity.type", "classic") - .put("index.similarity.my_similarity.discount_overlaps", false) - .build(); - final MapperService mapperService = createIndex("foo", indexSettings, "type", mapping).mapperService(); - assertThat(mapperService.fullName("field1").similarity().get(), instanceOf(ClassicSimilarity.class)); - - final ClassicSimilarity similarity = (ClassicSimilarity) mapperService.fullName("field1").similarity().get(); - assertThat(similarity.getDiscountOverlaps(), equalTo(false)); - } - } - -} diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java deleted file mode 100644 index 1bf95f612ce9f..0000000000000 --- a/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.mapping; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.test.ESIntegTestCase; - -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.not; - -public class LegacyUpdateMappingIntegrationIT extends ESIntegTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - @SuppressWarnings("unchecked") - public void testUpdateDefaultMappingSettings() throws Exception { - logger.info("Creating index with _default_ mappings"); - try (XContentBuilder defaultMapping = JsonXContent.contentBuilder()) { - defaultMapping.startObject(); - { - defaultMapping.startObject(MapperService.DEFAULT_MAPPING); - { - defaultMapping.field("date_detection", false); - } - defaultMapping.endObject(); - } - defaultMapping.endObject(); - client() - .admin() - .indices() - .prepareCreate("test") - .setSettings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0).build()) - .addMapping(MapperService.DEFAULT_MAPPING, defaultMapping) - .get(); - } - - { - final GetMappingsResponse getResponse = - client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); - final Map defaultMapping = - getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); - assertThat(defaultMapping, hasKey("date_detection")); - } - - logger.info("Emptying _default_ mappings"); - // now remove it - try (XContentBuilder mappingBuilder = - JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING).endObject().endObject()) { - final AcknowledgedResponse putResponse = - client() - .admin() - .indices() - .preparePutMapping("test") - .setType(MapperService.DEFAULT_MAPPING) - .setSource(mappingBuilder) - .get(); - assertThat(putResponse.isAcknowledged(), equalTo(true)); - } - logger.info("Done Emptying _default_ mappings"); - - { - final GetMappingsResponse getResponse = - client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); - final Map defaultMapping = - getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); - assertThat(defaultMapping, not(hasKey("date_detection"))); - } - - // now test you can change stuff that are normally unchangeable - logger.info("Creating _default_ mappings with an analyzed field"); - try (XContentBuilder defaultMapping = JsonXContent.contentBuilder()) { - - defaultMapping.startObject(); - { - defaultMapping.startObject(MapperService.DEFAULT_MAPPING); - { - defaultMapping.startObject("properties"); - { - defaultMapping.startObject("f"); - { - defaultMapping.field("type", "text"); - defaultMapping.field("index", true); - } - defaultMapping.endObject(); - } - defaultMapping.endObject(); - } - defaultMapping.endObject(); - } - defaultMapping.endObject(); - - final AcknowledgedResponse putResponse = - client() - .admin() - .indices() - .preparePutMapping("test") - .setType(MapperService.DEFAULT_MAPPING).setSource(defaultMapping) - .get(); - assertThat(putResponse.isAcknowledged(), equalTo(true)); - } - - logger.info("Changing _default_ mappings field from analyzed to non-analyzed"); - { - try (XContentBuilder mappingBuilder = JsonXContent.contentBuilder()) { - mappingBuilder.startObject(); - { - mappingBuilder.startObject(MapperService.DEFAULT_MAPPING); - { - mappingBuilder.startObject("properties"); - { - mappingBuilder.startObject("f"); - { - mappingBuilder.field("type", "keyword"); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - - final AcknowledgedResponse putResponse = - client() - .admin() - .indices() - .preparePutMapping("test") - .setType(MapperService.DEFAULT_MAPPING) - .setSource(mappingBuilder) - .get(); - assertThat(putResponse.isAcknowledged(), equalTo(true)); - } - } - logger.info("Done changing _default_ mappings field from analyzed to non-analyzed"); - - { - final GetMappingsResponse getResponse = - client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get(); - final Map defaultMapping = - getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); - final Map fieldSettings = (Map) ((Map) defaultMapping.get("properties")).get("f"); - assertThat(fieldSettings, hasEntry("type", "keyword")); - } - - // but we still validate the _default_ type - logger.info("Confirming _default_ mappings validation"); - try (XContentBuilder mappingBuilder = JsonXContent.contentBuilder()) { - - mappingBuilder.startObject(); - { - mappingBuilder.startObject(MapperService.DEFAULT_MAPPING); - { - mappingBuilder.startObject("properites"); - { - mappingBuilder.startObject("f"); - { - mappingBuilder.field("type", "non-existent"); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - } - mappingBuilder.endObject(); - - expectThrows( - MapperParsingException.class, - () -> client() - .admin() - .indices() - .preparePutMapping("test") - .setType(MapperService.DEFAULT_MAPPING) - .setSource(mappingBuilder) - .get()); - } - - } - -} diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index fffa501cc4be4..6128f8d39fcf6 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -440,21 +440,6 @@ public void testInvalidField() throws IOException { } } - public void testToFilterDeprecationMessage() throws IOException { - Directory dir = new RAMDirectory(); - try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { - writer.commit(); - } - try (IndexReader reader = DirectoryReader.open(dir)) { - QueryShardContext context = createShardContext(Version.V_6_3_0, reader, "_uid", null, 1,0); - SliceBuilder builder = new SliceBuilder("_uid", 5, 10); - Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT); - assertThat(query, instanceOf(TermsSliceQuery.class)); - assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); - assertWarnings("Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead"); - } - } - public void testToFilterWithRouting() throws IOException { Directory dir = new RAMDirectory(); try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java index bf73f2efba42a..8b6f137443a5d 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java @@ -119,8 +119,7 @@ public void testParseSetupAndSkipSectionNoSkip() throws Exception { assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); assertThat(setupSection.getSkipSection(), notNullValue()); assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); - assertThat(setupSection.getSkipSection().getUpperVersion(), - equalTo(Version.V_6_3_0)); + assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(Version.fromString("6.3.0"))); assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(setupSection.getExecutableSections().size(), equalTo(2)); assertThat(setupSection.getExecutableSections().get(0), instanceOf(DoSection.class)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java index cf94312b6a72b..2420fb68de169 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.license; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; @@ -44,34 +43,14 @@ public boolean isAcknowledged() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - type = in.readString(); - acknowledge = in.readBoolean(); - } else { - type = "trial"; - acknowledge = true; - } + type = in.readString(); + acknowledge = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { - Version version = Version.V_6_3_0; - if (out.getVersion().onOrAfter(version)) { - super.writeTo(out); - out.writeString(type); - out.writeBoolean(acknowledge); - } else { - if ("trial".equals(type) == false) { - throw new IllegalArgumentException("All nodes in cluster must be version [" + version - + "] or newer to start trial with a different type than 'trial'. Attempting to write to " + - "a node with version [" + out.getVersion() + "] with trial type [" + type + "]."); - } else if (acknowledge == false) { - throw new IllegalArgumentException("Request must be acknowledged to send to a node with a version " + - "prior to [" + version + "]. Attempting to send request to node with version [" + out.getVersion() + "] " + - "without acknowledgement."); - } else { - super.writeTo(out); - } - } + super.writeTo(out); + out.writeString(type); + out.writeBoolean(acknowledge); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java index 93aa923483e79..de995096fc7f3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.license; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -18,11 +17,6 @@ class PostStartTrialResponse extends ActionResponse { - // Nodes Prior to 6.3 did not have NEED_ACKNOWLEDGEMENT as part of status - enum Pre63Status { - UPGRADED_TO_TRIAL, - TRIAL_ALREADY_ACTIVATED; - } enum Status { UPGRADED_TO_TRIAL(true, null, RestStatus.OK), TRIAL_ALREADY_ACTIVATED(false, "Operation failed: Trial was already activated.", RestStatus.FORBIDDEN), @@ -76,47 +70,31 @@ public Status getStatus() { @Override public void readFrom(StreamInput in) throws IOException { status = in.readEnum(Status.class); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - acknowledgeMessage = in.readOptionalString(); - int size = in.readVInt(); - Map acknowledgeMessages = new HashMap<>(size); - for (int i = 0; i < size; i++) { - String feature = in.readString(); - int nMessages = in.readVInt(); - String[] messages = new String[nMessages]; - for (int j = 0; j < nMessages; j++) { - messages[j] = in.readString(); - } - acknowledgeMessages.put(feature, messages); + acknowledgeMessage = in.readOptionalString(); + int size = in.readVInt(); + Map acknowledgeMessages = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String feature = in.readString(); + int nMessages = in.readVInt(); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = in.readString(); } - this.acknowledgeMessages = acknowledgeMessages; - } else { - this.acknowledgeMessages = Collections.emptyMap(); + acknowledgeMessages.put(feature, messages); } + this.acknowledgeMessages = acknowledgeMessages; } @Override public void writeTo(StreamOutput out) throws IOException { - Version version = Version.V_6_3_0; - if (out.getVersion().onOrAfter(version)) { - out.writeEnum(status); - out.writeOptionalString(acknowledgeMessage); - out.writeVInt(acknowledgeMessages.size()); - for (Map.Entry entry : acknowledgeMessages.entrySet()) { - out.writeString(entry.getKey()); - out.writeVInt(entry.getValue().length); - for (String message : entry.getValue()) { - out.writeString(message); - } - } - } else { - if (status == Status.UPGRADED_TO_TRIAL) { - out.writeEnum(Pre63Status.UPGRADED_TO_TRIAL); - } else if (status == Status.TRIAL_ALREADY_ACTIVATED) { - out.writeEnum(Pre63Status.TRIAL_ALREADY_ACTIVATED); - } else { - throw new IllegalArgumentException("Starting trial on node with version [" + Version.CURRENT + "] requires " + - "acknowledgement parameter."); + out.writeEnum(status); + out.writeOptionalString(acknowledgeMessage); + out.writeVInt(acknowledgeMessages.size()); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + out.writeString(entry.getKey()); + out.writeVInt(entry.getValue().length); + for (String message : entry.getValue()) { + out.writeString(message); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index ababc3c21289a..696e3a2871fb3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.SpecialPermission; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -194,11 +193,7 @@ public static List nodesNotReadyForXPackCustomMetadata(ClusterSta // check that all nodes would be capable of deserializing newly added x-pack metadata final List notReadyNodes = StreamSupport.stream(clusterState.nodes().spliterator(), false).filter(node -> { final String xpackInstalledAttr = node.getAttributes().getOrDefault(XPACK_INSTALLED_NODE_ATTR, "false"); - - // The node attribute XPACK_INSTALLED_NODE_ATTR was only introduced in 6.3.0, so when - // we have an older node in this mixed-version cluster without any x-pack metadata, - // we want to prevent x-pack from adding custom metadata - return node.getVersion().before(Version.V_6_3_0) || Booleans.parseBoolean(xpackInstalledAttr) == false; + return Booleans.parseBoolean(xpackInstalledAttr) == false; }).collect(Collectors.toList()); return notReadyNodes; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java index beff26eb34d82..ae68b2fdb26ab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; @@ -149,20 +148,12 @@ public Response(List scheduledEvents) { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag was removed - in.readBoolean(); - } in.readList(ScheduledEvent::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag is no longer supported - out.writeBoolean(true); - } out.writeList(scheduledEvents); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java index 345c4f1a96db4..0314103a3006b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; @@ -147,10 +146,6 @@ public Response(Calendar calendar) { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag was removed - in.readBoolean(); - } calendar = new Calendar(in); } @@ -158,10 +153,6 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag is no longer supported - out.writeBoolean(true); - } calendar.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java index 448d826973595..1ac325b864536 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -119,20 +118,12 @@ public DatafeedConfig getResponse() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag was removed - in.readBoolean(); - } datafeed = new DatafeedConfig(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag is no longer supported - out.writeBoolean(true); - } datafeed.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java index dc3983644f7b7..2ae19c4f32250 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -148,20 +147,12 @@ public Job getResponse() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag was removed - in.readBoolean(); - } job = new Job(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag is no longer supported - out.writeBoolean(true); - } job.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java index 316598b6ab505..cae1efb7e7a31 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -172,20 +171,12 @@ public ModelSnapshot getModel() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag was removed - in.readBoolean(); - } model = new ModelSnapshot(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_6_3_0)) { - //the acknowledged flag is no longer supported - out.writeBoolean(true); - } model.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java index 6ecee409c30f1..3e1b0ea6b3c55 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -93,9 +92,6 @@ public void readFrom(StreamInput in) throws IOException { jobId = in.readString(); update = new JobUpdate(in); isInternal = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.getVersion().before(Version.V_7_0_0)) { - in.readBoolean(); // was waitForAck - } } @Override @@ -104,9 +100,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); update.writeTo(out); out.writeBoolean(isInternal); - if (out.getVersion().onOrAfter(Version.V_6_3_0) && out.getVersion().before(Version.V_7_0_0)) { - out.writeBoolean(false); // was waitForAck - } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 81a0e017c6584..b50b7d2fa5126 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -127,7 +127,7 @@ public JobUpdate(StreamInput in) throws IOException { } customSettings = in.readMap(); modelSnapshotId = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.readBoolean()) { + if (in.readBoolean()) { jobVersion = Version.readVersion(in); } else { jobVersion = null; @@ -166,13 +166,11 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeMap(customSettings); out.writeOptionalString(modelSnapshotId); - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - if (jobVersion != null) { - out.writeBoolean(true); - Version.writeVersion(jobVersion, out); - } else { - out.writeBoolean(false); - } + if (jobVersion != null) { + out.writeBoolean(true); + Version.writeVersion(jobVersion, out); + } else { + out.writeBoolean(false); } if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeOptionalBoolean(clearJobFinishTime); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java index e1933ef1a59ef..02bef36c00ab0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java @@ -336,7 +336,7 @@ public static class Builder { // Stored snapshot documents created prior to 6.3.0 will have no // value for min_version. - private Version minVersion = Version.V_6_3_0; + private Version minVersion = Version.fromString("6.3.0"); private Date timestamp; private String description; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java index a8cf5b895fb35..90728753dd366 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.monitoring; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,9 +26,7 @@ public class MonitoringFeatureSetUsage extends XPackFeatureSet.Usage { public MonitoringFeatureSetUsage(StreamInput in) throws IOException { super(in); exporters = in.readMap(); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - collectionEnabled = in.readOptionalBoolean(); - } + collectionEnabled = in.readOptionalBoolean(); } public MonitoringFeatureSetUsage(boolean available, boolean enabled, @@ -47,9 +44,7 @@ public Map getExporters() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeMap(exporters); - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - out.writeOptionalBoolean(collectionEnabled); - } + out.writeOptionalBoolean(collectionEnabled); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java index 12192da0bb22f..4fda1971a012f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java @@ -7,7 +7,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -81,10 +80,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); tookInMillis = in.readVLong(); error = in.readOptionalWriteable(Error::new); - - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - ignored = in.readBoolean(); - } + ignored = in.readBoolean(); } @Override @@ -92,10 +88,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVLong(tookInMillis); out.writeOptionalWriteable(error); - - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - out.writeBoolean(ignored); - } + out.writeBoolean(ignored); } public static class Error implements Writeable, ToXContentObject { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java index 94306966a34da..4b81b3c288a3c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java @@ -114,6 +114,6 @@ public int hashCode() { @Override public Version getMinimalSupportedVersion() { - return Version.V_6_3_0; + return Version.CURRENT.minimumCompatibilityVersion(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java index 3daf242d5203f..d65dd243ace47 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.security.user; -import org.elasticsearch.Version; import org.elasticsearch.xpack.core.security.support.MetadataUtils; /** @@ -15,7 +14,6 @@ public class BeatsSystemUser extends User { public static final String NAME = UsernamesField.BEATS_NAME; public static final String ROLE_NAME = UsernamesField.BEATS_ROLE; - public static final Version DEFINED_SINCE = Version.V_6_3_0; public BeatsSystemUser(boolean enabled) { super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index bc8d7817f4d69..7c599e95cc026 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -242,7 +242,7 @@ public void testSecurityPlatinumExpired() { public void testNewTrialDefaultsSecurityOff() { XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); - licenseState.update(TRIAL, true, VersionUtils.randomVersionBetween(random(), Version.V_6_3_0, Version.CURRENT)); + licenseState.update(TRIAL, true, VersionUtils.randomCompatibleVersion(random(), Version.CURRENT)); assertThat(licenseState.isSecurityDisabledByLicenseDefaults(), is(true)); assertSecurityNotAllowed(licenseState); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java index 59731cab71db8..ac1fe54f85abd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; import org.elasticsearch.xpack.core.ssl.SSLService; @@ -48,9 +47,8 @@ public void testNodesNotReadyForXPackCustomMetadata() { DiscoveryNodes.Builder discoveryNodes = DiscoveryNodes.builder(); for (int i = 0; i < randomInt(3); i++) { - final Version version = VersionUtils.randomVersion(random()); final Map attributes; - if (randomBoolean() && version.onOrAfter(Version.V_6_3_0)) { + if (randomBoolean()) { attributes = Collections.singletonMap(XPackPlugin.XPACK_INSTALLED_NODE_ATTR, "true"); } else { nodesCompatible = false; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java index 81d344fd1dd02..802f969d88609 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -144,7 +145,7 @@ public void testStoppedDatafeedConfigs() { public void testUpdateJobForMigration() { Job.Builder oldJob = JobTests.buildJobBuilder("pre-migration"); - Version oldVersion = Version.V_6_3_0; + Version oldVersion = VersionUtils.randomVersion(random()); oldJob.setJobVersion(oldVersion); Job migratedJob = MlConfigMigrator.updateJobForMigration(oldJob.build()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 1065503e091d4..5f1a4050d1f3e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -406,8 +406,7 @@ public void testSelectLeastLoadedMlNode_jobWithRulesButNoNodeMeetsRequiredVersio Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); - Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), - VersionUtils.getPreviousVersion(Version.V_6_4_0)); + Version version = Version.fromString("6.3.0"); DiscoveryNodes nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, Collections.emptySet(), version)) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java index d644a63e7bcaa..ff545549e80ef 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringFeatureSetTests.java @@ -63,7 +63,6 @@ public void testEnabledDefault() { } public void testUsage() throws Exception { - // anything prior to 6.3 does not include collection_enabled (so defaults it to null) final Version serializedVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); final boolean collectionEnabled = randomBoolean(); int localCount = randomIntBetween(0, 5); @@ -116,11 +115,7 @@ public void testUsage() throws Exception { usage.toXContent(builder, ToXContent.EMPTY_PARAMS); source = ObjectPath.createFromXContent(builder.contentType().xContent(), BytesReference.bytes(builder)); } - if (usage == monitoringUsage || serializedVersion.onOrAfter(Version.V_6_3_0)) { - assertThat(source.evaluate("collection_enabled"), is(collectionEnabled)); - } else { - assertThat(source.evaluate("collection_enabled"), is(nullValue())); - } + assertThat(source.evaluate("collection_enabled"), is(collectionEnabled)); assertThat(source.evaluate("enabled_exporters"), is(notNullValue())); if (localCount > 0) { assertThat(source.evaluate("enabled_exporters.local"), is(localCount)); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java index 3ac7f2de63c5e..901025ff2c444 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java @@ -79,12 +79,7 @@ public void testSerialization() throws IOException { } else { assertThat(response2.getError(), is(notNullValue())); } - - if (version.onOrAfter(Version.V_6_3_0)) { - assertThat(response2.isIgnored(), is(response.isIgnored())); - } else { - assertThat(response2.isIgnored(), is(false)); - } + assertThat(response2.isIgnored(), is(response.isIgnored())); } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java index 7e498efa4df2e..ac6248f4f30d6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java @@ -7,7 +7,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureSetting; @@ -199,10 +198,7 @@ public void users(ActionListener> listener) { private void getUserInfo(final String username, ActionListener listener) { - if (userIsDefinedForCurrentSecurityMapping(username) == false) { - logger.debug("Marking user [{}] as disabled because the security mapping is not at the required version", username); - listener.onResponse(disabledDefaultUserInfo.deepClone()); - } else if (securityIndex.indexExists() == false) { + if (securityIndex.indexExists() == false) { listener.onResponse(getDefaultUserInfo(username)); } else { nativeUsersStore.getReservedUserInfo(username, ActionListener.wrap((userInfo) -> { @@ -227,24 +223,6 @@ private ReservedUserInfo getDefaultUserInfo(String username) { } } - private boolean userIsDefinedForCurrentSecurityMapping(String username) { - final Version requiredVersion = getDefinedVersion(username); - return securityIndex.checkMappingVersion(requiredVersion::onOrBefore); - } - - private Version getDefinedVersion(String username) { - switch (username) { - case BeatsSystemUser.NAME: - return BeatsSystemUser.DEFINED_SINCE; - case APMSystemUser.NAME: - return APMSystemUser.DEFINED_SINCE; - case RemoteMonitoringUser.NAME: - return RemoteMonitoringUser.DEFINED_SINCE; - default: - return Version.CURRENT.minimumIndexCompatibilityVersion(); - } - } - public static void addSettings(List> settingsList) { settingsList.add(BOOTSTRAP_ELASTIC_PASSWORD); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index 0d48cd6b856e1..b62cb44ac028c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -130,12 +130,6 @@ public SecurityIndexManager freeze() { return new SecurityIndexManager(null, aliasName, internalIndexName, internalIndexFormat, mappingSourceSupplier, indexState); } - public boolean checkMappingVersion(Predicate requiredVersion) { - // pull value into local variable for consistent view - final State currentIndexState = this.indexState; - return currentIndexState.mappingVersion == null || requiredVersion.test(currentIndexState.mappingVersion); - } - public String aliasName() { return aliasName; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java index 33cec72060886..3245c064ef07f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java @@ -158,7 +158,6 @@ public void testReservedUsersOnly() { NativeUsersStore usersStore = mock(NativeUsersStore.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); when(securityIndex.isAvailable()).thenReturn(true); - when(securityIndex.checkMappingVersion(any())).thenReturn(true); ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); ReservedRealm reservedRealm = diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java index ea1b6483fd795..42efeebf03f19 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java @@ -32,14 +32,12 @@ import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.ExecutionException; -import java.util.function.Predicate; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -72,7 +70,6 @@ public void setupMocks() throws Exception { usersStore = mock(NativeUsersStore.class); securityIndex = mock(SecurityIndexManager.class); when(securityIndex.isAvailable()).thenReturn(true); - when(securityIndex.checkMappingVersion(any())).thenReturn(true); mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); @@ -164,8 +161,6 @@ private void verifySuccessfulAuthentication(boolean enabled) throws Exception { verify(securityIndex, times(2)).indexExists(); verify(usersStore, times(2)).getReservedUserInfo(eq(principal), any(ActionListener.class)); - final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); - verify(securityIndex, times(2)).checkMappingVersion(predicateCaptor.capture()); verifyNoMoreInteractions(usersStore); } @@ -182,9 +177,6 @@ public void testLookup() throws Exception { assertEquals(expectedUser, user); verify(securityIndex).indexExists(); - final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); - verify(securityIndex).checkMappingVersion(predicateCaptor.capture()); - PlainActionFuture future = new PlainActionFuture<>(); reservedRealm.doLookupUser("foobar", future); final User doesntExist = future.actionGet(); @@ -229,9 +221,6 @@ public void testLookupThrows() throws Exception { verify(securityIndex).indexExists(); verify(usersStore).getReservedUserInfo(eq(principal), any(ActionListener.class)); - final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); - verify(securityIndex).checkMappingVersion(predicateCaptor.capture()); - verifyNoMoreInteractions(usersStore); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 6e7a9806781b5..157e0ffb82013 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -394,14 +394,6 @@ private static String loadTemplate(String templateName) { return TemplateUtils.loadTemplate(resource, Version.CURRENT.toString(), TEMPLATE_VERSION_PATTERN); } - public void testMappingVersionMatching() throws IOException { - String templateString = "/" + SECURITY_MAIN_TEMPLATE_7 + ".json"; - ClusterState.Builder clusterStateBuilder = createClusterStateWithMappingAndTemplate(templateString); - manager.clusterChanged(new ClusterChangedEvent("test-event", clusterStateBuilder.build(), EMPTY_CLUSTER_STATE)); - assertTrue(manager.checkMappingVersion(Version.CURRENT.minimumIndexCompatibilityVersion()::before)); - assertFalse(manager.checkMappingVersion(Version.CURRENT.minimumIndexCompatibilityVersion()::after)); - } - public void testMissingVersionMappingThrowsError() throws IOException { String templateString = "/missing-version-security-index-template.json"; ClusterState.Builder clusterStateBuilder = createClusterStateWithMappingAndTemplate(templateString); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index a62a23dac70b8..3ac0b20f95d0f 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -203,7 +203,6 @@ public void testWatcher() throws Exception { * Tests that a RollUp job created on a old cluster is correctly restarted after the upgrade. */ public void testRollupAfterRestart() throws Exception { - assumeTrue("Rollup can be tested with 6.3.0 and onwards", getOldClusterVersion().onOrAfter(Version.V_6_3_0)); if (isRunningAgainstOldCluster()) { final int numDocs = 59; final int year = randomIntBetween(1970, 2018); From d992b1da009be22cbcc25b13aadc0e56a77b9d89 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Fri, 24 May 2019 16:48:45 +0200 Subject: [PATCH 251/321] Shard CLI tool always check shards (#41480) The shard CLI tool would not do anything if a corruption marker was not present. But a corruption marker is only added if a corruption is detected during indexing/writing, not if a search or other read fails. Changed the tool to always check shards regardless of corruption marker presence. Related to #41298 --- .../RemoveCorruptedLuceneSegmentsAction.java | 17 +--- .../RemoveCorruptedShardDataCommandTests.java | 90 +++++++++++++++---- 2 files changed, 79 insertions(+), 28 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java index b4b59872758ed..da0257c19e334 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedLuceneSegmentsAction.java @@ -38,9 +38,7 @@ public Tuple getCleanStatus Lock writeLock, PrintStream printStream, boolean verbose) throws IOException { - if (RemoveCorruptedShardDataCommand.isCorruptMarkerFileIsPresent(indexDirectory) == false) { - return Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CLEAN, null); - } + boolean markedCorrupted = RemoveCorruptedShardDataCommand.isCorruptMarkerFileIsPresent(indexDirectory); final CheckIndex.Status status; try (CheckIndex checker = new CheckIndex(indexDirectory, writeLock)) { @@ -55,7 +53,9 @@ public Tuple getCleanStatus } return status.clean - ? Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CLEAN_WITH_CORRUPTED_MARKER, null) + ? Tuple.tuple(markedCorrupted + ? RemoveCorruptedShardDataCommand.CleanStatus.CLEAN_WITH_CORRUPTED_MARKER + : RemoveCorruptedShardDataCommand.CleanStatus.CLEAN, null) : Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CORRUPTED, "Corrupted Lucene index segments found - " + status.totLoseDocCount + " documents will be lost."); } @@ -67,8 +67,6 @@ public void execute(Terminal terminal, Lock writeLock, PrintStream printStream, boolean verbose) throws IOException { - checkCorruptMarkerFileIsPresent(indexDirectory); - final CheckIndex.Status status; try (CheckIndex checker = new CheckIndex(indexDirectory, writeLock)) { @@ -90,11 +88,4 @@ public void execute(Terminal terminal, } } } - - protected void checkCorruptMarkerFileIsPresent(Directory directory) throws IOException { - if (RemoveCorruptedShardDataCommand.isCorruptMarkerFileIsPresent(directory) == false) { - throw new ElasticsearchException("There is no corruption file marker"); - } - } - } diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index c9a7b236d9c8f..c7b1846356363 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -76,6 +76,9 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase { private Path translogPath; private Path indexPath; + private static final Pattern NUM_CORRUPT_DOCS_PATTERN = + Pattern.compile("Corrupted Lucene index segments found -\\s+(?\\d+) documents will be lost."); + @Before public void setup() throws IOException { shardId = new ShardId("index0", "_na_", 0); @@ -154,11 +157,13 @@ public void testCorruptedIndex() throws Exception { final boolean corruptSegments = randomBoolean(); CorruptionUtils.corruptIndex(random(), indexPath, corruptSegments); - // test corrupted shard - final IndexShard corruptedShard = reopenIndexShard(true); - allowShardFailures(); - expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); - closeShards(corruptedShard); + if (randomBoolean()) { + // test corrupted shard and add corruption marker + final IndexShard corruptedShard = reopenIndexShard(true); + allowShardFailures(); + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + closeShards(corruptedShard); + } final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); final MockTerminal t = new MockTerminal(); @@ -196,8 +201,7 @@ public void testCorruptedIndex() throws Exception { final Set shardDocUIDs = getShardDocUIDs(newShard); - final Pattern pattern = Pattern.compile("Corrupted Lucene index segments found -\\s+(?\\d+) documents will be lost."); - final Matcher matcher = pattern.matcher(output); + final Matcher matcher = NUM_CORRUPT_DOCS_PATTERN.matcher(output); assertThat(matcher.find(), equalTo(true)); final int expectedNumDocs = numDocs - Integer.parseInt(matcher.group("docs")); @@ -272,12 +276,13 @@ public void testCorruptedBothIndexAndTranslog() throws Exception { CorruptionUtils.corruptIndex(random(), indexPath, false); - // test corrupted shard - final IndexShard corruptedShard = reopenIndexShard(true); - allowShardFailures(); - expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); - closeShards(corruptedShard); - + if (randomBoolean()) { + // test corrupted shard and add corruption marker + final IndexShard corruptedShard = reopenIndexShard(true); + allowShardFailures(); + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + closeShards(corruptedShard); + } TestTranslog.corruptRandomTranslogFile(logger, random(), Arrays.asList(translogPath)); final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); @@ -313,8 +318,7 @@ public void testCorruptedBothIndexAndTranslog() throws Exception { final Set shardDocUIDs = getShardDocUIDs(newShard); - final Pattern pattern = Pattern.compile("Corrupted Lucene index segments found -\\s+(?\\d+) documents will be lost."); - final Matcher matcher = pattern.matcher(output); + final Matcher matcher = NUM_CORRUPT_DOCS_PATTERN.matcher(output); assertThat(matcher.find(), equalTo(true)); final int expectedNumDocs = numDocsToKeep - Integer.parseInt(matcher.group("docs")); @@ -347,6 +351,62 @@ public void testResolveIndexDirectory() throws Exception { shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); } + public void testCleanWithCorruptionMarker() throws Exception { + // index some docs in several segments + final int numDocs = indexDocs(indexShard, true); + + indexShard.store().markStoreCorrupted(null); + + closeShards(indexShard); + + allowShardFailures(); + final IndexShard corruptedShard = reopenIndexShard(true); + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + closeShards(corruptedShard); + + final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); + final MockTerminal t = new MockTerminal(); + final OptionParser parser = command.getParser(); + + final OptionSet options = parser.parse("-d", translogPath.toString()); + // run command with dry-run + t.addTextInput("n"); // mean dry run + t.addTextInput("n"); // mean dry run + t.setVerbosity(Terminal.Verbosity.VERBOSE); + try { + command.execute(t, options, environment); + fail(); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), containsString("aborted by user")); + assertThat(t.getOutput(), containsString("Continue and remove corrupted data from the shard ?")); + assertThat(t.getOutput(), containsString("Lucene index is marked corrupted, but no corruption detected")); + } + + logger.info("--> output:\n{}", t.getOutput()); + + // run command without dry-run + t.reset(); + t.addTextInput("y"); + t.addTextInput("y"); + command.execute(t, options, environment); + + final String output = t.getOutput(); + logger.info("--> output:\n{}", output); + + failOnShardFailures(); + final IndexShard newShard = newStartedShard(p -> reopenIndexShard(false), true); + + final Set shardDocUIDs = getShardDocUIDs(newShard); + assertEquals(numDocs, shardDocUIDs.size()); + + assertThat(t.getOutput(), containsString("This shard has been marked as corrupted but no corruption can now be detected.")); + + final Matcher matcher = NUM_CORRUPT_DOCS_PATTERN.matcher(output); + assertFalse(matcher.find()); + + closeShards(newShard); + } + private IndexShard reopenIndexShard(boolean corrupted) throws IOException { // open shard with the same location final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), From 43848fc40e4cb3b3c965577ee4088c0f8707c0a2 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 24 May 2019 17:18:53 +0200 Subject: [PATCH 252/321] Wipe repositories more often (#42511) Fixes an issue where repositories are unintentionally shared among tests (given that the repo contents is captured in a static variable on the test class, to allow "sharing" among nodes) and two tests randomly chose the same snapshot name, leading to a conflict. Closes #42519 --- .../gcs/GoogleCloudStorageBlobStoreRepositoryTests.java | 6 +++--- .../repositories/s3/S3BlobStoreRepositoryTests.java | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index db166a228b576..0e3ecde69c4f0 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; -import org.junit.AfterClass; +import org.junit.After; import java.util.Collection; import java.util.Collections; @@ -67,8 +67,8 @@ protected void afterCreationCheck(Repository repository) { assertThat(repository, instanceOf(GoogleCloudStorageRepository.class)); } - @AfterClass - public static void wipeRepository() { + @After + public void wipeRepository() { blobs.clear(); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 61c0328e516b7..e94ea5ef6c9c3 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; -import org.junit.AfterClass; +import org.junit.After; import org.junit.BeforeClass; import java.util.Collection; @@ -78,8 +78,8 @@ public static void setUpRepositorySettings() { } } - @AfterClass - public static void wipeRepository() { + @After + public void wipeRepository() { blobs.clear(); } From cd324a1b3ae4e68eea3a98d764738be4c3c4eb56 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 24 May 2019 11:19:41 -0400 Subject: [PATCH 253/321] Add test to verify force primary allocation on closed indices (#42458) This change adds a test verifying that we can force primary allocation on closed indices. --- .../cluster/routing/PrimaryAllocationIT.java | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 00a2f5e34a791..0e6b24c45d169 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -23,9 +23,11 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -65,6 +67,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -231,7 +234,9 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { Set historyUUIDs = Arrays.stream(client().admin().indices().prepareStats("test").clear().get().getShards()) .map(shard -> shard.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY)).collect(Collectors.toSet()); createStaleReplicaScenario(master); - + if (randomBoolean()) { + assertAcked(client().admin().indices().prepareClose("test").setWaitForActiveShards(0)); + } boolean useStaleReplica = randomBoolean(); // if true, use stale replica, otherwise a completely empty copy logger.info("--> explicitly promote old primary shard"); final String idxName = "test"; @@ -281,15 +286,18 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get() .getState().routingTable().index(idxName).allPrimaryShardsActive())); } - assertHitCount(client().prepareSearch(idxName).setSize(0).setQuery(matchAllQuery()).get(), useStaleReplica ? 1L : 0L); - + ShardStats[] shardStats = client().admin().indices().prepareStats("test") + .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED).get().getShards(); + for (ShardStats shardStat : shardStats) { + assertThat(shardStat.getCommitStats().getNumDocs(), equalTo(useStaleReplica ? 1 : 0)); + } // allocation id of old primary was cleaned from the in-sync set final ClusterState state = client().admin().cluster().prepareState().get().getState(); assertEquals(Collections.singleton(state.routingTable().index(idxName).shard(0).primary.allocationId().getId()), state.metaData().index(idxName).inSyncAllocationIds(0)); - Set newHistoryUUIds = Arrays.stream(client().admin().indices().prepareStats("test").clear().get().getShards()) + Set newHistoryUUIds = Stream.of(shardStats) .map(shard -> shard.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY)).collect(Collectors.toSet()); assertThat(newHistoryUUIds, everyItem(not(isIn(historyUUIDs)))); assertThat(newHistoryUUIds, hasSize(1)); From dfc3b8e416f35b4d2ea1348f362614ff2ecc4b08 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 24 May 2019 09:00:38 -0700 Subject: [PATCH 254/321] [DOCS] Removes X-Pack setup (#42481) --- docs/reference/index.asciidoc | 2 -- docs/reference/redirects.asciidoc | 8 ++++++++ docs/reference/setup/setup-xes.asciidoc | 18 ------------------ 3 files changed, 8 insertions(+), 20 deletions(-) delete mode 100644 docs/reference/setup/setup-xes.asciidoc diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 5ed2a93e4bae4..64f5b57d57c17 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -14,8 +14,6 @@ include::getting-started.asciidoc[] include::setup.asciidoc[] -include::setup/setup-xes.asciidoc[] - include::monitoring/configuring-monitoring.asciidoc[] include::{xes-repo-dir}/security/configuring-es.asciidoc[] diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index b5f0e08a45232..9a8a0c20bb272 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -595,3 +595,11 @@ See <>. [role="exclude",id="_faster_prefix_queries_with_literal_index_prefixes_literal.html"] See <>. + +[role="exclude",id="setup-xpack"] +=== Set up {xpack} + +{xpack} is an Elastic Stack extension that provides security, alerting, +monitoring, reporting, machine learning, and many other capabilities. By default, +when you install {es}, {xpack} is installed. + diff --git a/docs/reference/setup/setup-xes.asciidoc b/docs/reference/setup/setup-xes.asciidoc deleted file mode 100644 index 55c1fe8bf42f6..0000000000000 --- a/docs/reference/setup/setup-xes.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -[role="xpack"] -[[setup-xpack]] -== Set up {xpack} - -{xpack} is an Elastic Stack extension that provides security, alerting, -monitoring, reporting, machine learning, and many other capabilities. By default, -when you install {es}, {xpack} is installed. - -If you want to try all of the {xpack} features, you can -{stack-ov}/license-management.html[start a 30-day trial]. At the end of the -trial period, you can purchase a subscription to keep using the full -functionality of the {xpack} components. For more information, see -https://www.elastic.co/subscriptions. - -* <> -* <> -* <> -* <> From ffa5461b7f6bbc5d6587fd181591b710bd53a543 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 24 May 2019 09:31:24 -0700 Subject: [PATCH 255/321] [DOCS] Removes X-Pack Java client configuration (#42480) --- docs/reference/index.asciidoc | 2 - docs/reference/redirects.asciidoc | 8 ++ docs/reference/setup/setup-xclient.asciidoc | 113 -------------------- 3 files changed, 8 insertions(+), 115 deletions(-) delete mode 100644 docs/reference/setup/setup-xclient.asciidoc diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 64f5b57d57c17..8e5fea810fb7b 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -18,8 +18,6 @@ include::monitoring/configuring-monitoring.asciidoc[] include::{xes-repo-dir}/security/configuring-es.asciidoc[] -include::setup/setup-xclient.asciidoc[] - include::setup/bootstrap-checks-xes.asciidoc[] :edit_url: diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 9a8a0c20bb272..6f68d781f4856 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -603,3 +603,11 @@ See <>. monitoring, reporting, machine learning, and many other capabilities. By default, when you install {es}, {xpack} is installed. +[role="exclude",id="setup-xpack-client"] +=== Configuring {xpack} Java Clients + +The `TransportClient` is deprecated in favour of the +{java-rest}/java-rest-high.html[Java High Level REST Client] and was removed in +Elasticsearch 8.0. The +{java-rest}/java-rest-high-level-migration.html[migration guide] describes all +the steps needed to migrate. \ No newline at end of file diff --git a/docs/reference/setup/setup-xclient.asciidoc b/docs/reference/setup/setup-xclient.asciidoc deleted file mode 100644 index a192aeb6ea39a..0000000000000 --- a/docs/reference/setup/setup-xclient.asciidoc +++ /dev/null @@ -1,113 +0,0 @@ -[role="xpack"] -[testenv="basic"] -[[setup-xpack-client]] -== Configuring {xpack} Java Clients - -deprecated[7.0.0, The `TransportClient` is deprecated in favour of the {java-rest}/java-rest-high.html[Java High Level REST Client] and will be removed in Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all the steps needed to migrate.] - -If you want to use a Java {javaclient}/transport-client.html[transport client] with a -cluster where {xpack} is installed, then you must download and configure the -{xpack} transport client. - -. Add the {xpack} transport JAR file to your *CLASSPATH*. You can download the {xpack} -distribution and extract the JAR file manually or you can get it from the -https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.jar[Elasticsearch Maven repository]. -As with any dependency, you will also need its transitive dependencies. Refer to the -https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.pom[X-Pack POM file -for your version] when downloading for offline usage. - -. If you are using Maven, you need to add the {xpack} JAR file as a dependency in -your project's `pom.xml` file: -+ --- -[source,xml] --------------------------------------------------------------- - - - - - elasticsearch-releases - https://artifacts.elastic.co/maven - - true - - - false - - - ... - - ... - - - - - org.elasticsearch.client - x-pack-transport - {version} - - ... - - ... - - --------------------------------------------------------------- --- - -. If you are using Gradle, you need to add the {xpack} JAR file as a dependency in -your `build.gradle` file: -+ --- -[source,groovy] --------------------------------------------------------------- -repositories { - /* ... Any other repositories ... */ - - // Add the Elasticsearch Maven Repository - maven { - name "elastic" - url "https://artifacts.elastic.co/maven" - } -} - -dependencies { - compile "org.elasticsearch.client:x-pack-transport:{version}" - - /* ... */ -} --------------------------------------------------------------- --- - -. If you are using a repository manager such as https://www.sonatype.com/nexus-repository-oss[Nexus OSS] within your -company, you need to add the repository as per the following screenshot: -+ --- -image::security/images/nexus.png["Adding the Elastic repo in Nexus",link="images/nexus.png"] - -Then in your project's `pom.xml` if using maven, add the following repositories and dependencies definitions: - -[source,xml] --------------------------------------------------------------- - - - org.elasticsearch.client - x-pack-transport - {version} - - - - - - local-nexus - Elastic Local Nexus - http://0.0.0.0:8081/repository/elasticsearch/ - - true - - - false - - - --------------------------------------------------------------- --- From a92f3504749dafed90942722ae4e59cf55d1f527 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 24 May 2019 13:16:48 -0400 Subject: [PATCH 256/321] SQL: Moves the JTS-based tests suppression to Before (#42526) Moves the test suppression from `ClassRule` to `Before`, where it is properly handled in the CI build. Fixes #42221 --- .../xpack/sql/qa/geo/GeoSqlSpecTestCase.java | 4 +-- .../sql/qa/src/main/resources/geo/geo.csv | 30 +++++++++---------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java index ec97cab6f10b1..025b04d66ce95 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/geo/GeoSqlSpecTestCase.java @@ -31,8 +31,6 @@ public abstract class GeoSqlSpecTestCase extends SpecBaseIntegrationTestCase { @ClassRule public static LocalH2 H2 = new LocalH2((c) -> { - assumeTrue("JTS inside H2 is using default local for toUpperCase() in string comparison making it fail to parse WKT on certain" + - " locales", "point".toUpperCase(Locale.getDefault()).equals("POINT")); // Load GIS extensions H2GISFunctions.load(c); c.createStatement().execute("RUNSCRIPT FROM 'classpath:/ogc/sqltsch.sql'"); @@ -52,6 +50,8 @@ public static List readScriptSpec() throws Exception { public void setupTestGeoDataIfNeeded() throws Exception { assumeTrue("Cannot support locales that don't use Hindu-Arabic numerals and non-ascii - sign due to H2", "-42".equals(NumberFormat.getInstance(Locale.getDefault()).format(-42))); + assumeTrue("JTS inside H2 is using default local for toUpperCase() in string comparison making it fail to parse WKT on certain" + + " locales", "point".toUpperCase(Locale.getDefault()).equals("POINT")); if (client().performRequest(new Request("HEAD", "/ogc")).getStatusLine().getStatusCode() == 404) { GeoDataLoader.loadOGCDatasetIntoEs(client(), "ogc"); } diff --git a/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv b/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv index 8275bd7c884ef..d21ea71c5b949 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv +++ b/x-pack/plugin/sql/qa/src/main/resources/geo/geo.csv @@ -1,16 +1,16 @@ city,region,region_point,location,shape -Mountain View,Americas,POINT(-105.2551 54.5260),point (-122.083843 37.386483),point (-122.083843 37.386483) -Chicago,Americas,POINT(-105.2551 54.5260),point (-87.637874 41.888783),point (-87.637874 41.888783) -New York,Americas,POINT(-105.2551 54.5260),point (-73.990027 40.745171),point (-73.990027 40.745171) -San Francisco,Americas,POINT(-105.2551 54.5260),point (-122.394228 37.789541),point (-122.394228 37.789541) -Phoenix,Americas,POINT(-105.2551 54.5260),point (-111.973505 33.376242),point (-111.973505 33.376242) -Amsterdam,Europe,POINT(15.2551 54.5260),point (4.850312 52.347557),point (4.850312 52.347557) -Berlin,Europe,POINT(15.2551 54.5260),point (13.390889 52.486701),point (13.390889 52.486701) -Munich,Europe,POINT(15.2551 54.5260),point (11.537505 48.146321),point (11.537505 48.146321) -London,Europe,POINT(15.2551 54.5260),point (-0.121672 51.510871),point (-0.121672 51.510871) -Paris,Europe,POINT(15.2551 54.5260),point (2.351773 48.845538),point (2.351773 48.845538) -Singapore,Asia,POINT(100.6197 34.0479),point (103.855535 1.295868),point (103.855535 1.295868) -Hong Kong,Asia,POINT(100.6197 34.0479),point (114.183925 22.281397),point (114.183925 22.281397) -Seoul,Asia,POINT(100.6197 34.0479),point (127.060851 37.509132),point (127.060851 37.509132) -Tokyo,Asia,POINT(100.6197 34.0479),point (139.76402225 35.669616),point (139.76402225 35.669616) -Sydney,Asia,POINT(100.6197 34.0479),point (151.208629 -33.863385),point (151.208629 -33.863385) +Mountain View,Americas,POINT(-105.2551 54.5260),POINT (-122.083843 37.386483),POINT (-122.083843 37.386483) +Chicago,Americas,POINT(-105.2551 54.5260),POINT (-87.637874 41.888783),POINT (-87.637874 41.888783) +New York,Americas,POINT(-105.2551 54.5260),POINT (-73.990027 40.745171),POINT (-73.990027 40.745171) +San Francisco,Americas,POINT(-105.2551 54.5260),POINT (-122.394228 37.789541),POINT (-122.394228 37.789541) +Phoenix,Americas,POINT(-105.2551 54.5260),POINT (-111.973505 33.376242),POINT (-111.973505 33.376242) +Amsterdam,Europe,POINT(15.2551 54.5260),POINT (4.850312 52.347557),POINT (4.850312 52.347557) +Berlin,Europe,POINT(15.2551 54.5260),POINT (13.390889 52.486701),POINT (13.390889 52.486701) +Munich,Europe,POINT(15.2551 54.5260),POINT (11.537505 48.146321),POINT (11.537505 48.146321) +London,Europe,POINT(15.2551 54.5260),POINT (-0.121672 51.510871),POINT (-0.121672 51.510871) +Paris,Europe,POINT(15.2551 54.5260),POINT (2.351773 48.845538),POINT (2.351773 48.845538) +Singapore,Asia,POINT(100.6197 34.0479),POINT (103.855535 1.295868),POINT (103.855535 1.295868) +Hong Kong,Asia,POINT(100.6197 34.0479),POINT (114.183925 22.281397),POINT (114.183925 22.281397) +Seoul,Asia,POINT(100.6197 34.0479),POINT (127.060851 37.509132),POINT (127.060851 37.509132) +Tokyo,Asia,POINT(100.6197 34.0479),POINT (139.76402225 35.669616),POINT (139.76402225 35.669616) +Sydney,Asia,POINT(100.6197 34.0479),POINT (151.208629 -33.863385),POINT (151.208629 -33.863385) From 1b6dc178388c4a4163fb908419c20a580e23b46a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 24 May 2019 10:30:06 -0700 Subject: [PATCH 257/321] Remove transport client from tests (#42457) This commit removes testing infrastructure for using the transport client. --- .../mustache/MultiSearchTemplateIT.java | 5 - .../join/query/ParentChildTestCase.java | 5 - .../index/rankeval/RankEvalRequestIT.java | 5 - .../documentation/ReindexDocumentationIT.java | 5 - .../index/reindex/ReindexTestCase.java | 5 - .../index/reindex/RetryTests.java | 7 - .../elasticsearch/ESNetty4IntegTestCase.java | 13 - .../netty4/Netty4HttpRequestSizeLimitIT.java | 2 +- .../rest/discovery/Zen2RestApiIT.java | 2 +- .../azure/classic/AzureSimpleTests.java | 1 - .../classic/AzureTwoStartedNodesTests.java | 1 - .../ec2/Ec2DiscoveryUpdateSettingsTests.java | 2 +- .../org/elasticsearch/NioIntegTestCase.java | 13 - .../http/ContextAndHeaderTransportIT.java | 347 -------------- .../elasticsearch/http/HttpSmokeTestCase.java | 13 - .../admin/cluster/node/tasks/TasksIT.java | 7 +- .../admin/indices/exists/IndicesExistsIT.java | 3 +- .../transport/FailAndRetryMockTransport.java | 235 ---------- .../client/transport/NodeDisconnectIT.java | 83 ---- .../TransportClientHeadersTests.java | 190 -------- .../client/transport/TransportClientIT.java | 108 ----- .../TransportClientNodesServiceTests.java | 441 ------------------ .../transport/TransportClientRetryIT.java | 85 ---- .../transport/TransportClientTests.java | 121 ----- .../elasticsearch/cluster/ClusterStateIT.java | 350 -------------- .../coordination/RareClusterStateIT.java | 2 +- .../ClusterDisruptionCleanSettingsIT.java | 2 +- .../discovery/ClusterDisruptionIT.java | 2 +- .../discovery/DiscoveryDisruptionIT.java | 2 +- .../discovery/MasterDisruptionIT.java | 2 +- .../discovery/SnapshotDisruptionIT.java | 2 +- .../discovery/StableMasterDisruptionIT.java | 2 +- .../single/SingleNodeDiscoveryIT.java | 4 +- .../query/plugin/CustomQueryParserIT.java | 5 - .../index/store/ExceptionRetryIT.java | 2 +- .../indices/settings/InternalSettingsIT.java | 5 - .../indices/settings/PrivateSettingsIT.java | 5 - .../PersistentTasksExecutorFullRestartIT.java | 5 - .../persistent/PersistentTasksExecutorIT.java | 5 - .../decider/EnableAssignmentDeciderIT.java | 5 - .../recovery/FullRollingRestartIT.java | 2 +- .../SignificantTermsSignificanceScoreIT.java | 5 - .../search/fetch/FetchSubPhasePluginIT.java | 5 - .../functionscore/FunctionScorePluginIT.java | 5 - .../search/scroll/SearchScrollIT.java | 6 +- .../DedicatedClusterSnapshotRestoreIT.java | 2 +- .../snapshots/SnapshotShardsServiceIT.java | 2 +- .../threadpool/SimpleThreadPoolIT.java | 3 +- .../ConcurrentSeqNoVersioningIT.java | 3 +- .../elasticsearch/test/ESIntegTestCase.java | 127 +---- .../test/InternalTestCluster.java | 110 +---- .../org/elasticsearch/test/TestCluster.java | 7 +- .../test/test/InternalTestClusterTests.java | 14 +- .../elasticsearch/xpack/CcrIntegTestCase.java | 4 +- .../xpack/ccr/CcrDisabledIT.java | 11 - .../AbstractLicensesIntegrationTestCase.java | 13 - .../license/LicenseServiceClusterTests.java | 7 +- .../license/StartBasicLicenseTests.java | 8 +- .../license/StartTrialLicenseTests.java | 8 +- .../snapshots/SourceOnlySnapshotIT.java | 2 +- .../IndexLifecycleInitialisationTests.java | 2 +- .../xpack/ml/support/BaseMlIntegTestCase.java | 19 +- .../monitoring/MultiNodesStatsTests.java | 2 +- .../AbstractIndicesCleanerTestCase.java | 2 +- .../exporter/http/HttpExporterIT.java | 2 +- .../exporter/http/HttpExporterSslIT.java | 2 +- .../local/LocalExporterIntegTests.java | 2 +- .../LocalExporterResourceIntegTests.java | 2 +- .../test/MonitoringIntegTestCase.java | 16 - .../DocumentLevelSecurityTests.java | 5 - .../integration/FieldLevelSecurityTests.java | 8 - .../ShrinkIndexWithSecurityTests.java | 2 +- .../test/SecurityIntegTestCase.java | 3 +- .../SecurityServerTransportServiceTests.java | 9 - .../xpack/security/TemplateUpgraderTests.java | 2 +- .../AuditTrailSettingsUpdateTests.java | 2 +- .../filter/IpFilteringIntegrationTests.java | 2 +- .../filter/IpFilteringUpdateTests.java | 2 +- .../transport/ssl/EllipticCurveSSLTests.java | 15 - .../xpack/ssl/SSLTrustRestrictionsTests.java | 2 +- .../sql/action/AbstractSqlIntegTestCase.java | 7 +- .../xpack/sql/action/SqlDisabledIT.java | 8 - .../AbstractWatcherIntegrationTestCase.java | 18 +- .../test/integration/SingleNodeTests.java | 2 +- 84 files changed, 78 insertions(+), 2501 deletions(-) delete mode 100644 qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/NodeDisconnectIT.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/TransportClientRetryIT.java delete mode 100644 server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java delete mode 100644 server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index 0463069609d4c..d7cdea7bd73f9 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -48,11 +48,6 @@ protected Collection> nodePlugins() { return Collections.singleton(MustachePlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - public void testBasic() throws Exception { createIndex("msearch"); final int numDocs = randomIntBetween(10, 100); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java index 87b16bc448ef1..40d46a88fe2a6 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java @@ -50,11 +50,6 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, ParentJoinPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override public Settings indexSettings() { Settings.Builder builder = Settings.builder().put(super.indexSettings()) diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index 9fb10251f6325..a132ee5cb5938 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -51,11 +51,6 @@ public class RankEvalRequestIT extends ESIntegTestCase { private static final String INDEX_ALIAS = "alias0"; private static final int RELEVANT_RATING_1 = 1; - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(RankEvalPlugin.class); - } - @Override protected Collection> nodePlugins() { return Arrays.asList(RankEvalPlugin.class); diff --git a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index 8def6dbb40316..7667fbbcf89f8 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -78,11 +78,6 @@ protected Collection> nodePlugins() { return Arrays.asList(ReindexPlugin.class, ReindexCancellationPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(ReindexPlugin.class); - } - @Before public void setup() { client().admin().indices().prepareCreate(INDEX_NAME).get(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java index 01b5539a23c48..2b53f2842f164 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java @@ -41,11 +41,6 @@ protected Collection> nodePlugins() { return Arrays.asList(ReindexPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(ReindexPlugin.class); - } - protected ReindexRequestBuilder reindex() { return new ReindexRequestBuilder(client(), ReindexAction.INSTANCE); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index 916c18e38f7f5..4a0813a6a7486 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -75,13 +75,6 @@ protected Collection> nodePlugins() { Netty4Plugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList( - ReindexPlugin.class, - Netty4Plugin.class); - } - /** * Lower the queue sizes to be small enough that both bulk and searches will time out and have to be retried. */ diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java b/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java index b38cda76c6980..9d8baf9e3f871 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/ESNetty4IntegTestCase.java @@ -52,21 +52,8 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } - @Override - protected Settings transportClientSettings() { - Settings.Builder builder = Settings.builder().put(super.transportClientSettings()); - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME); - return builder.build(); - } - @Override protected Collection> nodePlugins() { return Arrays.asList(Netty4Plugin.class); } - - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(Netty4Plugin.class); - } - } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index 52732d5bc1df4..e45f6de92d5fe 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -45,7 +45,7 @@ * As the same setting is also used to limit in-flight requests on transport level, we avoid transport messages by forcing * a single node "cluster". We also force test infrastructure to use the node client instead of the transport client for the same reason. */ -@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1, transportClientRatio = 0) +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1) public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase { private static final ByteSizeValue LIMIT = new ByteSizeValue(2, ByteSizeUnit.KB); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java index 83d4c3419ef64..fcb8e75700d0c 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java @@ -47,7 +47,7 @@ // These tests are here today so they have access to a proper REST client. They cannot be in :server:integTest since the REST client needs a // proper transport implementation, and they cannot be REST tests today since they need to restart nodes. When #35599 and friends land we // should be able to move these tests to run against a proper cluster instead. TODO do this. -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0, autoMinMasterNodes = false) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) public class Zen2RestApiIT extends ESNetty4IntegTestCase { @Override diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java index 1a36e6c55bd0e..3c174de172e2a 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java @@ -29,7 +29,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, - transportClientRatio = 0.0, numClientNodes = 0) public class AzureSimpleTests extends AbstractAzureComputeServiceTestCase { diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java index c454569b7f260..79fced1801c15 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java @@ -27,7 +27,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, - transportClientRatio = 0.0, numClientNodes = 0) public class AzureTwoStartedNodesTests extends AbstractAzureComputeServiceTestCase { diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java index 9802479fe84d3..f9d576874c510 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java @@ -34,7 +34,7 @@ * starting. * This test requires AWS to run. */ -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) +@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class Ec2DiscoveryUpdateSettingsTests extends AbstractAwsTestCase { public void testMinimumMasterNodesStart() { Settings nodeSettings = Settings.builder() diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java index 9ed64213cbf5f..6de96d17fe239 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java @@ -52,21 +52,8 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } - @Override - protected Settings transportClientSettings() { - Settings.Builder builder = Settings.builder().put(super.transportClientSettings()); - builder.put(NetworkModule.TRANSPORT_TYPE_KEY, NioTransportPlugin.NIO_TRANSPORT_NAME); - return builder.build(); - } - @Override protected Collection> nodePlugins() { return Collections.singletonList(NioTransportPlugin.class); } - - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(NioTransportPlugin.class); - } - } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java deleted file mode 100644 index 47cce87c4b959..0000000000000 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ /dev/null @@ -1,347 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.http; - -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.ActionFilter; -import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.client.Response; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.GeoShapeQueryBuilder; -import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; -import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.query.TermsQueryBuilder; -import org.elasticsearch.indices.TermsLookup; -import org.elasticsearch.plugins.ActionPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.watcher.ResourceWatcherService; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; - -import static java.util.Collections.singletonList; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; - -@ClusterScope(scope = SUITE) -public class ContextAndHeaderTransportIT extends HttpSmokeTestCase { - private static final List requests = new CopyOnWriteArrayList<>(); - private static final String CUSTOM_HEADER = "SomeCustomHeader"; - private String randomHeaderValue = randomAlphaOfLength(20); - private String queryIndex = "query-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - private String lookupIndex = "lookup-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .build(); - } - - @Override - protected Collection> nodePlugins() { - ArrayList> plugins = new ArrayList<>(super.nodePlugins()); - plugins.add(ActionLoggingPlugin.class); - plugins.add(CustomHeadersPlugin.class); - return plugins; - } - - @Before - public void createIndices() throws Exception { - String mapping = Strings.toString(jsonBuilder().startObject().startObject("type") - .startObject("properties") - .startObject("location").field("type", "geo_shape").endObject() - .startObject("name").field("type", "text").endObject() - .endObject() - .endObject().endObject()); - - Settings settings = Settings.builder() - .put(indexSettings()) - .put(SETTING_NUMBER_OF_SHARDS, 1) // A single shard will help to keep the tests repeatable. - .build(); - assertAcked(transportClient().admin().indices().prepareCreate(lookupIndex) - .setSettings(settings).addMapping("type", mapping, XContentType.JSON)); - assertAcked(transportClient().admin().indices().prepareCreate(queryIndex) - .setSettings(settings).addMapping("type", mapping, XContentType.JSON)); - ensureGreen(queryIndex, lookupIndex); - requests.clear(); - } - - @After - public void checkAllRequestsContainHeaders() { - assertRequestsContainHeader(IndexRequest.class); - assertRequestsContainHeader(RefreshRequest.class); - } - - public void testThatTermsLookupGetRequestContainsContextAndHeaders() throws Exception { - transportClient().prepareIndex(lookupIndex, "type", "1") - .setSource(jsonBuilder().startObject().array("followers", "foo", "bar", "baz").endObject()).get(); - transportClient().prepareIndex(queryIndex, "type", "1") - .setSource(jsonBuilder().startObject().field("username", "foo").endObject()).get(); - transportClient().admin().indices().prepareRefresh(queryIndex, lookupIndex).get(); - - TermsLookup termsLookup = new TermsLookup(lookupIndex, "type", "1", "followers"); - TermsQueryBuilder termsLookupFilterBuilder = QueryBuilders.termsLookupQuery("username", termsLookup); - BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).must(termsLookupFilterBuilder); - - SearchResponse searchResponse = transportClient() - .prepareSearch(queryIndex) - .setQuery(queryBuilder) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - - assertGetRequestsContainHeaders(); - } - - - - public void testThatGeoShapeQueryGetRequestContainsContextAndHeaders() throws Exception { - transportClient().prepareIndex(lookupIndex, "type", "1").setSource(jsonBuilder().startObject() - .field("name", "Munich Suburban Area") - .startObject("location") - .field("type", "polygon") - .startArray("coordinates").startArray() - .startArray().value(11.34).value(48.25).endArray() - .startArray().value(11.68).value(48.25).endArray() - .startArray().value(11.65).value(48.06).endArray() - .startArray().value(11.37).value(48.13).endArray() - .startArray().value(11.34).value(48.25).endArray() // close the polygon - .endArray().endArray() - .endObject() - .endObject()) - .get(); - // second document - transportClient().prepareIndex(queryIndex, "type", "1").setSource(jsonBuilder().startObject() - .field("name", "Munich Center") - .startObject("location") - .field("type", "point") - .startArray("coordinates").value(11.57).value(48.13).endArray() - .endObject() - .endObject()) - .get(); - transportClient().admin().indices().prepareRefresh(lookupIndex, queryIndex).get(); - - GeoShapeQueryBuilder queryBuilder = QueryBuilders.geoShapeQuery("location", "1") - .indexedShapeIndex(lookupIndex) - .indexedShapePath("location"); - - SearchResponse searchResponse = transportClient() - .prepareSearch(queryIndex) - .setQuery(queryBuilder) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - assertThat(requests, hasSize(greaterThan(0))); - - assertGetRequestsContainHeaders(); - } - - public void testThatMoreLikeThisQueryMultiTermVectorRequestContainsContextAndHeaders() throws Exception { - transportClient().prepareIndex(lookupIndex, "type", "1") - .setSource(jsonBuilder().startObject().field("name", "Star Wars - The new republic").endObject()) - .get(); - transportClient().prepareIndex(queryIndex, "type", "1") - .setSource(jsonBuilder().startObject().field("name", "Jar Jar Binks - A horrible mistake").endObject()) - .get(); - transportClient().prepareIndex(queryIndex, "type", "2") - .setSource(jsonBuilder().startObject().field("name", "Star Wars - Return of the jedi").endObject()) - .get(); - transportClient().admin().indices().prepareRefresh(lookupIndex, queryIndex).get(); - - MoreLikeThisQueryBuilder moreLikeThisQueryBuilder = QueryBuilders.moreLikeThisQuery(new String[]{"name"}, null, - new Item[]{new Item(lookupIndex, "type", "1")}) - .minTermFreq(1) - .minDocFreq(1); - - SearchResponse searchResponse = transportClient() - .prepareSearch(queryIndex) - .setQuery(moreLikeThisQueryBuilder) - .get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 1); - - assertRequestsContainHeader(MultiTermVectorsRequest.class); - } - - public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws IOException { - final String IRRELEVANT_HEADER = "SomeIrrelevantHeader"; - Request request = new Request("GET", "/" + queryIndex + "/_search"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.addHeader(CUSTOM_HEADER, randomHeaderValue); - options.addHeader(IRRELEVANT_HEADER, randomHeaderValue); - request.setOptions(options); - Response response = getRestClient().performRequest(request); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - List searchRequests = getRequests(SearchRequest.class); - assertThat(searchRequests, hasSize(greaterThan(0))); - for (RequestAndHeaders requestAndHeaders : searchRequests) { - assertThat(requestAndHeaders.headers.containsKey(CUSTOM_HEADER), is(true)); - // was not specified, thus is not included - assertThat(requestAndHeaders.headers.containsKey(IRRELEVANT_HEADER), is(false)); - } - } - - private List getRequests(Class clazz) { - List results = new ArrayList<>(); - for (RequestAndHeaders request : requests) { - if (request.request.getClass().equals(clazz)) { - results.add(request); - } - } - - return results; - } - - private void assertRequestsContainHeader(Class clazz) { - List classRequests = getRequests(clazz); - for (RequestAndHeaders request : classRequests) { - assertRequestContainsHeader(request.request, request.headers); - } - } - - private void assertGetRequestsContainHeaders() { - assertGetRequestsContainHeaders(this.lookupIndex); - } - - private void assertGetRequestsContainHeaders(String index) { - List getRequests = getRequests(GetRequest.class); - assertThat(getRequests, hasSize(greaterThan(0))); - - for (RequestAndHeaders request : getRequests) { - if (!((GetRequest)request.request).index().equals(index)) { - continue; - } - assertRequestContainsHeader(request.request, request.headers); - } - } - - private void assertRequestContainsHeader(ActionRequest request, Map context) { - String msg = String.format(Locale.ROOT, "Expected header %s to be in request %s", CUSTOM_HEADER, request.getClass().getName()); - if (request instanceof IndexRequest) { - IndexRequest indexRequest = (IndexRequest) request; - msg = String.format(Locale.ROOT, "Expected header %s to be in index request %s/%s/%s", CUSTOM_HEADER, - indexRequest.index(), indexRequest.type(), indexRequest.id()); - } - assertThat(msg, context.containsKey(CUSTOM_HEADER), is(true)); - assertThat(context.get(CUSTOM_HEADER).toString(), is(randomHeaderValue)); - } - - /** - * a transport client that adds our random header - */ - private Client transportClient() { - return internalCluster().transportClient().filterWithHeader(Collections.singletonMap(CUSTOM_HEADER, randomHeaderValue)); - } - - public static class ActionLoggingPlugin extends Plugin implements ActionPlugin { - - private final SetOnce loggingFilter = new SetOnce<>(); - - @Override - public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, - ResourceWatcherService resourceWatcherService, ScriptService scriptService, - NamedXContentRegistry xContentRegistry, Environment environment, - NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - loggingFilter.set(new LoggingFilter(threadPool)); - return Collections.emptyList(); - } - - @Override - public List getActionFilters() { - return singletonList(loggingFilter.get()); - } - - } - - public static class LoggingFilter extends ActionFilter.Simple { - - private final ThreadPool threadPool; - - public LoggingFilter(ThreadPool pool) { - this.threadPool = pool; - } - - @Override - public int order() { - return 999; - } - - @Override - protected boolean apply(String action, ActionRequest request, ActionListener listener) { - requests.add(new RequestAndHeaders(threadPool.getThreadContext().getHeaders(), request)); - return true; - } - } - - private static class RequestAndHeaders { - final Map headers; - final ActionRequest request; - - private RequestAndHeaders(Map headers, ActionRequest request) { - this.headers = headers; - this.request = request; - } - } - - public static class CustomHeadersPlugin extends Plugin implements ActionPlugin { - public Collection getRestHeaders() { - return Collections.singleton(CUSTOM_HEADER); - } - } -} diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java index b40c6f3a1b26f..654cfb5e47129 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java @@ -82,19 +82,6 @@ protected Collection> nodePlugins() { return Arrays.asList(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class); - } - - @Override - protected Settings transportClientSettings() { - return Settings.builder() - .put(super.transportClientSettings()) - .put(NetworkModule.TRANSPORT_TYPE_KEY, clientTypeKey) - .build(); - } - @Override protected boolean ignoreExternalCluster() { return true; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 179fd82cda020..fc758788e6197 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -105,7 +105,7 @@ *

    * We need at least 2 nodes so we have a master node a non-master node */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, minNumDataNodes = 2, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, minNumDataNodes = 2) public class TasksIT extends ESIntegTestCase { private Map, RecordingTaskManagerListener> listeners = new HashMap<>(); @@ -122,11 +122,6 @@ protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class, TestTaskPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java index 7cfc2ea1f280d..51d3ecc89afc7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java @@ -30,8 +30,7 @@ import java.io.IOException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; -@ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0, - autoMinMasterNodes = false) +@ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, autoMinMasterNodes = false) public class IndicesExistsIT extends ESIntegTestCase { public void testIndexExistsWithBlocksInPlace() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java deleted file mode 100644 index 5149a0837e908..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.Lifecycle; -import org.elasticsearch.common.component.LifecycleListener; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.Maps; -import org.elasticsearch.transport.CloseableConnection; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.ConnectionProfile; -import org.elasticsearch.transport.RequestHandlerRegistry; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportMessageListener; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.TransportStats; - -import java.net.UnknownHostException; -import java.util.Collections; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.atomic.AtomicInteger; - -abstract class FailAndRetryMockTransport implements Transport { - - private final Random random; - private final ClusterName clusterName; - private volatile Map requestHandlers = Collections.emptyMap(); - private final Object requestHandlerMutex = new Object(); - private final ResponseHandlers responseHandlers = new ResponseHandlers(); - private TransportMessageListener listener; - - private boolean connectMode = true; - - private final AtomicInteger connectTransportExceptions = new AtomicInteger(); - private final AtomicInteger failures = new AtomicInteger(); - private final AtomicInteger successes = new AtomicInteger(); - private final Set triedNodes = new CopyOnWriteArraySet<>(); - - FailAndRetryMockTransport(Random random, ClusterName clusterName) { - this.random = new Random(random.nextLong()); - this.clusterName = clusterName; - } - - protected abstract ClusterState getMockClusterState(DiscoveryNode node); - - @Override - public Releasable openConnection(DiscoveryNode node, ConnectionProfile profile, ActionListener connectionListener) { - connectionListener.onResponse(new CloseableConnection() { - - @Override - public DiscoveryNode getNode() { - return node; - } - - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws TransportException { - //we make sure that nodes get added to the connected ones when calling addTransportAddress, by returning proper nodes info - if (connectMode) { - if (TransportLivenessAction.NAME.equals(action)) { - TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); - ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); - transportResponseHandler.handleResponse(new LivenessResponse(clusterName, node)); - } else if (ClusterStateAction.NAME.equals(action)) { - TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); - ClusterState clusterState = getMockClusterState(node); - transportResponseHandler.handleResponse(new ClusterStateResponse(clusterName, clusterState, false)); - } else if (TransportService.HANDSHAKE_ACTION_NAME.equals(action)) { - TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); - Version version = node.getVersion(); - transportResponseHandler.handleResponse(new TransportService.HandshakeResponse(node, clusterName, version)); - - } else { - throw new UnsupportedOperationException("Mock transport does not understand action " + action); - } - return; - } - - //once nodes are connected we'll just return errors for each sendRequest call - triedNodes.add(node); - - if (random.nextInt(100) > 10) { - connectTransportExceptions.incrementAndGet(); - throw new ConnectTransportException(node, "node not available"); - } else { - if (random.nextBoolean()) { - failures.incrementAndGet(); - //throw whatever exception that is not a subclass of ConnectTransportException - throw new IllegalStateException(); - } else { - TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); - if (random.nextBoolean()) { - successes.incrementAndGet(); - transportResponseHandler.handleResponse(newResponse()); - } else { - failures.incrementAndGet(); - transportResponseHandler.handleException(new TransportException("transport exception")); - } - } - } - } - }); - - return () -> {}; - } - - protected abstract Response newResponse(); - - public void endConnectMode() { - this.connectMode = false; - } - - public int connectTransportExceptions() { - return connectTransportExceptions.get(); - } - - public int failures() { - return failures.get(); - } - - public int successes() { - return successes.get(); - } - - public Set triedNodes() { - return triedNodes; - } - - - @Override - public BoundTransportAddress boundAddress() { - return null; - } - - @Override - public TransportAddress[] addressesFromString(String address) throws UnknownHostException { - throw new UnsupportedOperationException(); - } - - @Override - public Lifecycle.State lifecycleState() { - return null; - } - - @Override - public void addLifecycleListener(LifecycleListener listener) { - throw new UnsupportedOperationException(); - } - - @Override - public void removeLifecycleListener(LifecycleListener listener) { - throw new UnsupportedOperationException(); - } - - @Override - public void start() {} - - @Override - public void stop() {} - - @Override - public void close() {} - - @Override - public Map profileBoundAddresses() { - return Collections.emptyMap(); - } - - @Override - public TransportStats getStats() { - throw new UnsupportedOperationException(); - } - - @Override - public void registerRequestHandler(RequestHandlerRegistry reg) { - synchronized (requestHandlerMutex) { - if (requestHandlers.containsKey(reg.getAction())) { - throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); - } - requestHandlers = Maps.copyMapWithAddedEntry(requestHandlers, reg.getAction(), reg); - } - } - @Override - public ResponseHandlers getResponseHandlers() { - return responseHandlers; - } - - @Override - public RequestHandlerRegistry getRequestHandler(String action) { - return requestHandlers.get(action); - } - - - @Override - public void setMessageListener(TransportMessageListener listener) { - this.listener = listener; - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/NodeDisconnectIT.java b/server/src/test/java/org/elasticsearch/client/transport/NodeDisconnectIT.java deleted file mode 100644 index 6fa1848cc84b8..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/NodeDisconnectIT.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client.transport; - -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; -import java.util.stream.Collectors; - -import static org.elasticsearch.client.transport.TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL; - -@ClusterScope(scope = Scope.TEST) -public class NodeDisconnectIT extends ESIntegTestCase { - - public void testNotifyOnDisconnect() throws IOException { - internalCluster().ensureAtLeastNumDataNodes(2); - - final Set disconnectedNodes = Collections.synchronizedSet(new HashSet<>()); - try (TransportClient client = new MockTransportClient(Settings.builder() - .put("cluster.name", internalCluster().getClusterName()) - .put(CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.getKey(), "1h") // disable sniffing for better control - .build(), - Collections.emptySet(), (n, e) -> disconnectedNodes.add(n))) { - for (TransportService service : internalCluster().getInstances(TransportService.class)) { - client.addTransportAddress(service.boundAddress().publishAddress()); - } - internalCluster().stopRandomDataNode(); - for (int i = 0; i < 20; i++) { // fire up requests such that we hit the node and pass it to the listener - client.admin().cluster().prepareState().get(); - } - assertEquals(1, disconnectedNodes.size()); - } - assertEquals(1, disconnectedNodes.size()); - } - - public void testNotifyOnDisconnectInSniffer() throws IOException { - internalCluster().ensureAtLeastNumDataNodes(2); - - final Set disconnectedNodes = Collections.synchronizedSet(new HashSet<>()); - try (TransportClient client = new MockTransportClient(Settings.builder() - .put("cluster.name", internalCluster().getClusterName()).build(), Collections.emptySet(), (n, e) -> disconnectedNodes.add(n))) { - int numNodes = 0; - for (TransportService service : internalCluster().getInstances(TransportService.class)) { - numNodes++; - client.addTransportAddress(service.boundAddress().publishAddress()); - } - Set discoveryNodes = client.connectedNodes().stream().map(n -> n.getAddress()).collect(Collectors.toSet()); - assertEquals(numNodes, discoveryNodes.size()); - assertEquals(0, disconnectedNodes.size()); - internalCluster().stopRandomDataNode(); - client.getNodesService().doSample(); - assertEquals(1, disconnectedNodes.size()); - assertTrue(discoveryNodes.contains(disconnectedNodes.stream().findAny().get().getAddress())); - } - assertEquals(1, disconnectedNodes.size()); - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java deleted file mode 100644 index e63f3a1d59a29..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.action.Action; -import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.client.AbstractClientHeadersTestCase; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.NetworkPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportInterceptor; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; - -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { - - private MockTransportService transportService; - - @Override - public void tearDown() throws Exception { - try { - // stop this first before we bubble up since - // transportService uses the threadpool that super.tearDown will close - transportService.stop(); - transportService.close(); - } finally { - super.tearDown(); - } - - } - - @Override - protected Client buildClient(Settings headersSettings, Action[] testedActions) { - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); - transportService.start(); - transportService.acceptIncomingRequests(); - String transport = getTestTransportType(); - TransportClient client = new MockTransportClient(Settings.builder() - .put("client.transport.sniff", false) - .put("cluster.name", "cluster1") - .put("node.name", "transport_client_" + this.getTestName()) - .put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), transport) - .put(headersSettings) - .build(), InternalTransportServiceInterceptor.TestPlugin.class); - InternalTransportServiceInterceptor.TestPlugin plugin = client.injector.getInstance(PluginsService.class) - .filterPlugins(InternalTransportServiceInterceptor.TestPlugin.class).stream().findFirst().get(); - plugin.instance.threadPool = client.threadPool(); - plugin.instance.address = transportService.boundAddress().publishAddress(); - client.addTransportAddress(transportService.boundAddress().publishAddress()); - return client; - } - - public void testWithSniffing() throws Exception { - String transport = getTestTransportType(); - try (TransportClient client = new MockTransportClient( - Settings.builder() - .put("client.transport.sniff", true) - .put("cluster.name", "cluster1") - .put("node.name", "transport_client_" + this.getTestName() + "_1") - .put("client.transport.nodes_sampler_interval", "1s") - .put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), transport) - .put(HEADER_SETTINGS) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(), - InternalTransportServiceInterceptor.TestPlugin.class)) { - InternalTransportServiceInterceptor.TestPlugin plugin = client.injector.getInstance(PluginsService.class) - .filterPlugins(InternalTransportServiceInterceptor.TestPlugin.class).stream().findFirst().get(); - plugin.instance.threadPool = client.threadPool(); - plugin.instance.address = transportService.boundAddress().publishAddress(); - client.addTransportAddress(transportService.boundAddress().publishAddress()); - - if (!plugin.instance.clusterStateLatch.await(5, TimeUnit.SECONDS)) { - fail("takes way too long to get the cluster state"); - } - - assertEquals(1, client.connectedNodes().size()); - assertEquals(client.connectedNodes().get(0).getAddress(), transportService.boundAddress().publishAddress()); - } - } - - public static class InternalTransportServiceInterceptor implements TransportInterceptor { - - ThreadPool threadPool; - TransportAddress address; - - - public static class TestPlugin extends Plugin implements NetworkPlugin { - private InternalTransportServiceInterceptor instance = new InternalTransportServiceInterceptor(); - - @Override - public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, - ThreadContext threadContext) { - return Collections.singletonList(new TransportInterceptor() { - @Override - public TransportRequestHandler interceptHandler(String action, String executor, - boolean forceExecution, - TransportRequestHandler actualHandler) { - return instance.interceptHandler(action, executor, forceExecution, actualHandler); - } - - @Override - public AsyncSender interceptSender(AsyncSender sender) { - return instance.interceptSender(sender); - } - }); - } - } - - final CountDownLatch clusterStateLatch = new CountDownLatch(1); - - @Override - public AsyncSender interceptSender(AsyncSender sender) { - return new AsyncSender() { - @Override - public void sendRequest(Transport.Connection connection, String action, - TransportRequest request, - TransportRequestOptions options, - TransportResponseHandler handler) { - final ClusterName clusterName = new ClusterName("cluster1"); - if (TransportLivenessAction.NAME.equals(action)) { - assertHeaders(threadPool); - ((TransportResponseHandler) handler).handleResponse( - new LivenessResponse(clusterName, connection.getNode())); - } else if (ClusterStateAction.NAME.equals(action)) { - assertHeaders(threadPool); - ClusterName cluster1 = clusterName; - ClusterState.Builder builder = ClusterState.builder(cluster1); - //the sniffer detects only data nodes - builder.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("node_id", "someId", "some_ephemeralId_id", - address.address().getHostString(), address.getAddress(), address, Collections.emptyMap(), - Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT))); - ((TransportResponseHandler) handler) - .handleResponse(new ClusterStateResponse(cluster1, builder.build(), false)); - clusterStateLatch.countDown(); - } else if (TransportService.HANDSHAKE_ACTION_NAME .equals(action)) { - ((TransportResponseHandler) handler).handleResponse( - new TransportService.HandshakeResponse(connection.getNode(), clusterName, connection.getNode().getVersion())); - } else { - handler.handleException(new TransportException("", new InternalException(action))); - } - } - }; - } - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java deleted file mode 100644 index dab44b37a3ee9..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.MockNode; -import org.elasticsearch.node.Node; -import org.elasticsearch.node.NodeValidationException; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.MockHttpTransport; -import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.Arrays; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.startsWith; - -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 1.0) -public class TransportClientIT extends ESIntegTestCase { - - public void testPickingUpChangesInDiscoveryNode() { - String nodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false)); - - TransportClient client = (TransportClient) internalCluster().client(nodeName); - assertThat(client.connectedNodes().get(0).isDataNode(), equalTo(false)); - - } - - public void testNodeVersionIsUpdated() throws IOException, NodeValidationException { - TransportClient client = (TransportClient) internalCluster().client(); - try (Node node = new MockNode(Settings.builder() - .put(internalCluster().getDefaultSettings()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put("node.name", "testNodeVersionIsUpdated") - .put("transport.type", getTestTransportType()) - .put(Node.NODE_DATA_SETTING.getKey(), false) - .put("cluster.name", "foobar") - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), "testNodeVersionIsUpdated") - .build(), Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class)).start()) { - TransportAddress transportAddress = node.injector().getInstance(TransportService.class).boundAddress().publishAddress(); - client.addTransportAddress(transportAddress); - // since we force transport clients there has to be one node started that we connect to. - assertThat(client.connectedNodes().size(), greaterThanOrEqualTo(1)); - // connected nodes have updated version - for (DiscoveryNode discoveryNode : client.connectedNodes()) { - assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT)); - } - - for (DiscoveryNode discoveryNode : client.listedNodes()) { - assertThat(discoveryNode.getId(), startsWith("#transport#-")); - assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT.minimumCompatibilityVersion())); - } - - assertThat(client.filteredNodes().size(), equalTo(1)); - for (DiscoveryNode discoveryNode : client.filteredNodes()) { - assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT.minimumCompatibilityVersion())); - } - } - } - - public void testThatTransportClientSettingIsSet() { - TransportClient client = (TransportClient) internalCluster().client(); - Settings settings = client.injector.getInstance(Settings.class); - assertThat(Client.CLIENT_TYPE_SETTING_S.get(settings), is("transport")); - } - - public void testThatTransportClientSettingCannotBeChanged() { - String transport = getTestTransportType(); - Settings baseSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), transport) - .build(); - try (TransportClient client = new MockTransportClient(baseSettings)) { - Settings settings = client.injector.getInstance(Settings.class); - assertThat(Client.CLIENT_TYPE_SETTING_S.get(settings), is("transport")); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java deleted file mode 100644 index 9e13dbaa89b18..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ /dev/null @@ -1,441 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.node.Node; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportInterceptor; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.hamcrest.CustomMatcher; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static org.elasticsearch.test.transport.MockTransportService.createNewService; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.everyItem; -import static org.hamcrest.CoreMatchers.hasItem; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.CoreMatchers.startsWith; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.notNullValue; - -public class TransportClientNodesServiceTests extends ESTestCase { - - private static class TestIteration implements Closeable { - private final ThreadPool threadPool; - private final FailAndRetryMockTransport transport; - private final MockTransportService transportService; - private final TransportClientNodesService transportClientNodesService; - private final int listNodesCount; - private final int sniffNodesCount; - private TransportAddress livenessAddress = buildNewFakeTransportAddress(); - final List listNodeAddresses; - // map for each address of the nodes a cluster state request should respond with - final Map nodeMap; - - TestIteration() { - this(Settings.EMPTY); - } - - TestIteration(Settings extraSettings) { - Settings settings = Settings.builder().put(extraSettings).put("cluster.name", "test").build(); - ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - List listNodes = new ArrayList<>(); - Map nodeMap = new HashMap<>(); - this.listNodesCount = randomIntBetween(1, 10); - int sniffNodesCount = 0; - for (int i = 0; i < listNodesCount; i++) { - TransportAddress transportAddress = buildNewFakeTransportAddress(); - listNodes.add(transportAddress); - DiscoveryNodes.Builder discoNodes = DiscoveryNodes.builder(); - discoNodes.add(new DiscoveryNode("#list-node#-" + transportAddress, transportAddress, Version.CURRENT)); - - if (TransportClient.CLIENT_TRANSPORT_SNIFF.get(settings)) { - final int numSniffNodes = randomIntBetween(0, 3); - for (int j = 0; j < numSniffNodes; ++j) { - TransportAddress sniffAddress = buildNewFakeTransportAddress(); - DiscoveryNode sniffNode = new DiscoveryNode("#sniff-node#-" + sniffAddress, sniffAddress, Version.CURRENT); - discoNodes.add(sniffNode); - // also allow sniffing of the sniff node itself - nodeMap.put(sniffAddress, DiscoveryNodes.builder().add(sniffNode).build()); - ++sniffNodesCount; - } - } - nodeMap.put(transportAddress, discoNodes.build()); - } - listNodeAddresses = listNodes; - this.nodeMap = nodeMap; - this.sniffNodesCount = sniffNodesCount; - - threadPool = new TestThreadPool("transport-client-nodes-service-tests"); - transport = new FailAndRetryMockTransport(random(), clusterName) { - @Override - public List getDefaultSeedAddresses() { - return Collections.emptyList(); - } - - @Override - protected TestResponse newResponse() { - return new TestResponse(); - } - - @Override - protected ClusterState getMockClusterState(DiscoveryNode node) { - return ClusterState.builder(clusterName).nodes(TestIteration.this.nodeMap.get(node.getAddress())).build(); - } - }; - - transportService = new MockTransportService(settings, transport, threadPool, new TransportInterceptor() { - @Override - public AsyncSender interceptSender(AsyncSender sender) { - return new AsyncSender() { - @Override - public void sendRequest(Transport.Connection connection, String action, - TransportRequest request, - TransportRequestOptions options, - TransportResponseHandler handler) { - if (TransportLivenessAction.NAME.equals(action)) { - sender.sendRequest(connection, action, request, options, wrapLivenessResponseHandler(handler, - connection.getNode(), clusterName)); - } else { - sender.sendRequest(connection, action, request, options, handler); - } - } - }; - } - }, (addr) -> { - assert addr == null : "boundAddress: " + addr; - return DiscoveryNode.createLocal(settings, buildNewFakeTransportAddress(), UUIDs.randomBase64UUID()); - }, null, Collections.emptySet()); - transportService.addNodeConnectedBehavior((connectionManager, discoveryNode) -> false); - transportService.addGetConnectionBehavior((connectionManager, discoveryNode) -> { - // The FailAndRetryTransport does not use the connection profile - PlainActionFuture future = PlainActionFuture.newFuture(); - transport.openConnection(discoveryNode, null, future); - return future.actionGet(); - }); - transportService.start(); - transportService.acceptIncomingRequests(); - transportClientNodesService = - new TransportClientNodesService(settings, transportService, threadPool, (a, b) -> {}); - transportClientNodesService.addTransportAddresses(listNodeAddresses.toArray(new TransportAddress[0])); - } - - private TransportResponseHandler wrapLivenessResponseHandler(TransportResponseHandler handler, - DiscoveryNode node, - ClusterName clusterName) { - return new TransportResponseHandler() { - @Override - public T read(StreamInput in) throws IOException { - return handler.read(in); - } - - @Override - @SuppressWarnings("unchecked") - public void handleResponse(T response) { - LivenessResponse livenessResponse = new LivenessResponse(clusterName, - new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), "liveness-hostname" + node.getId(), - "liveness-hostaddress" + node.getId(), - livenessAddress, node.getAttributes(), node.getRoles(), - node.getVersion())); - handler.handleResponse((T)livenessResponse); - } - - @Override - public void handleException(TransportException exp) { - handler.handleException(exp); - } - - @Override - public String executor() { - return handler.executor(); - } - }; - } - - @Override - public void close() { - transport.endConnectMode(); - transportService.stop(); - transportClientNodesService.close(); - terminate(threadPool); - } - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37567") - public void testListenerFailures() throws InterruptedException { - int iters = iterations(10, 100); - for (int i = 0; i finalFailure = new AtomicReference<>(); - final AtomicReference response = new AtomicReference<>(); - ActionListener actionListener = new ActionListener() { - @Override - public void onResponse(TestResponse testResponse) { - response.set(testResponse); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - finalFailures.incrementAndGet(); - finalFailure.set(e); - latch.countDown(); - } - }; - - final AtomicInteger preSendFailures = new AtomicInteger(); - - iteration.transportClientNodesService.execute((node, retryListener) -> { - if (rarely()) { - preSendFailures.incrementAndGet(); - //throw whatever exception that is not a subclass of ConnectTransportException - throw new IllegalArgumentException(); - } - - iteration.transportService.sendRequest(node, "action", new TestRequest(), - TransportRequestOptions.EMPTY, new TransportResponseHandler() { - @Override - public TestResponse read(StreamInput in) { - return new TestResponse(in); - } - - @Override - public void handleResponse(TestResponse response1) { - retryListener.onResponse(response1); - } - - @Override - public void handleException(TransportException exp) { - retryListener.onFailure(exp); - } - - @Override - public String executor() { - return randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC; - } - }); - }, actionListener); - - latch.await(); - - //there can be only either one failure that causes the request to fail straightaway or success - assertThat(preSendFailures.get() + iteration.transport.failures() + iteration.transport.successes(), lessThanOrEqualTo(1)); - - if (iteration.transport.successes() == 1) { - assertThat(finalFailures.get(), equalTo(0)); - assertThat(finalFailure.get(), nullValue()); - assertThat(response.get(), notNullValue()); - } else { - assertThat(finalFailures.get(), equalTo(1)); - assertThat(finalFailure.get(), notNullValue()); - assertThat(response.get(), nullValue()); - if (preSendFailures.get() == 0 && iteration.transport.failures() == 0) { - assertThat(finalFailure.get(), instanceOf(NoNodeAvailableException.class)); - } - } - - assertThat(iteration.transport.triedNodes().size(), lessThanOrEqualTo(iteration.listNodesCount)); - assertThat(iteration.transport.triedNodes().size(), equalTo(iteration.transport.connectTransportExceptions() + - iteration.transport.failures() + iteration.transport.successes())); - } - } - } - - public void testConnectedNodes() { - int iters = iterations(10, 100); - for (int i = 0; i ("removed address") { - @Override - public boolean matches(Object item) { - return item instanceof DiscoveryNode && ((DiscoveryNode)item).getAddress().equals(addressToRemove); - } - }))); - assertEquals(iteration.listNodesCount + iteration.sniffNodesCount - 1, service.connectedNodes().size()); - } - } - - public void testSniffNodesSamplerClosesConnections() throws Exception { - final TestThreadPool threadPool = new TestThreadPool("testSniffNodesSamplerClosesConnections"); - - Settings remoteSettings = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "remote").build(); - try (MockTransportService remoteService = createNewService(remoteSettings, Version.CURRENT, threadPool, null)) { - final MockHandler handler = new MockHandler(remoteService); - remoteService.registerRequestHandler(ClusterStateAction.NAME, ThreadPool.Names.SAME, ClusterStateRequest::new, handler); - remoteService.start(); - remoteService.acceptIncomingRequests(); - - Settings clientSettings = Settings.builder() - .put(TransportClient.CLIENT_TRANSPORT_SNIFF.getKey(), true) - .put(TransportClient.CLIENT_TRANSPORT_PING_TIMEOUT.getKey(), TimeValue.timeValueSeconds(1)) - .put(TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.getKey(), TimeValue.timeValueSeconds(30)) - .build(); - - try (MockTransportService clientService = createNewService(clientSettings, Version.CURRENT, threadPool, null)) { - final List establishedConnections = new CopyOnWriteArrayList<>(); - - clientService.addConnectBehavior(remoteService, (transport, discoveryNode, profile, listener) -> - transport.openConnection(discoveryNode, profile, - ActionListener.delegateFailure(listener, (delegatedListener, connection) -> { - establishedConnections.add(connection); - delegatedListener.onResponse(connection); - }))); - - clientService.start(); - clientService.acceptIncomingRequests(); - - try (TransportClientNodesService transportClientNodesService = - new TransportClientNodesService(clientSettings, clientService, threadPool, (a, b) -> {})) { - assertEquals(0, transportClientNodesService.connectedNodes().size()); - assertEquals(0, establishedConnections.size()); - - transportClientNodesService.addTransportAddresses(remoteService.getLocalDiscoNode().getAddress()); - assertEquals(1, transportClientNodesService.connectedNodes().size()); - assertEquals(1, clientService.connectionManager().size()); - - transportClientNodesService.doSample(); - assertEquals(1, clientService.connectionManager().size()); - - establishedConnections.clear(); - handler.failToRespond(); - Thread thread = new Thread(transportClientNodesService::doSample); - thread.start(); - - assertBusy(() -> assertTrue(establishedConnections.size() >= 1)); - assertFalse("Temporary ping connection must be opened", establishedConnections.get(0).isClosed()); - - thread.join(); - - assertTrue(establishedConnections.get(0).isClosed()); - } - } - } finally { - terminate(threadPool); - } - } - - class MockHandler implements TransportRequestHandler { - - private final AtomicBoolean failToRespond = new AtomicBoolean(false); - private final MockTransportService transportService; - - MockHandler(MockTransportService transportService) { - this.transportService = transportService; - } - - @Override - public void messageReceived(ClusterStateRequest request, TransportChannel channel, Task task) throws Exception { - if (failToRespond.get()) { - return; - } - - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(transportService.getLocalDiscoNode()).build(); - ClusterState build = ClusterState.builder(ClusterName.DEFAULT).nodes(discoveryNodes).build(); - channel.sendResponse(new ClusterStateResponse(ClusterName.DEFAULT, build, false)); - } - - void failToRespond() { - if (failToRespond.compareAndSet(false, true) == false) { - throw new AssertionError("Request handler is already marked as failToRespond"); - } - } - } - - public static class TestRequest extends TransportRequest { - - } - - private static class TestResponse extends TransportResponse { - - private TestResponse() {} - private TestResponse(StreamInput in) {} - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientRetryIT.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientRetryIT.java deleted file mode 100644 index 8444b3bd1374f..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientRetryIT.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.concurrent.ExecutionException; - -import static org.hamcrest.Matchers.greaterThanOrEqualTo; - -@ClusterScope(scope = Scope.TEST, numClientNodes = 0, supportsDedicatedMasters = false) -public class TransportClientRetryIT extends ESIntegTestCase { - public void testRetry() throws IOException, ExecutionException, InterruptedException { - Iterable instances = internalCluster().getInstances(TransportService.class); - TransportAddress[] addresses = new TransportAddress[internalCluster().size()]; - int i = 0; - for (TransportService instance : instances) { - addresses[i++] = instance.boundAddress().publishAddress(); - } - - String transport = getTestTransportType(); - - Settings.Builder builder = Settings.builder().put("client.transport.nodes_sampler_interval", "1s") - .put("node.name", "transport_client_retry_test") - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), internalCluster().getClusterName()) - .put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(),transport) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()); - - try (TransportClient client = new MockTransportClient(builder.build())) { - client.addTransportAddresses(addresses); - assertEquals(client.connectedNodes().size(), internalCluster().size()); - - int size = cluster().size(); - //kill all nodes one by one, leaving a single master/data node at the end of the loop - for (int j = 1; j < size; j++) { - internalCluster().stopRandomNode(input -> true); - - ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest().local(true); - ClusterState clusterState; - //use both variants of execute method: with and without listener - if (randomBoolean()) { - clusterState = client.admin().cluster().state(clusterStateRequest).get().getState(); - } else { - PlainActionFuture future = PlainActionFuture.newFuture(); - client.admin().cluster().state(clusterStateRequest, future); - clusterState = future.get().getState(); - } - assertThat(clusterState.nodes().getSize(), greaterThanOrEqualTo(size - j)); - assertThat(client.connectedNodes().size(), greaterThanOrEqualTo(size - j)); - } - } - } -} diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java deleted file mode 100644 index 03ac1ebc3b67b..0000000000000 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; -import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.TransportSettings; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.ExecutionException; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.hasItem; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.object.HasToString.hasToString; - -public class TransportClientTests extends ESTestCase { - - public void testThatUsingAClosedClientThrowsAnException() throws ExecutionException, InterruptedException { - final TransportClient client = new MockTransportClient(Settings.EMPTY); - client.close(); - final IllegalStateException e = - expectThrows(IllegalStateException.class, () -> client.admin().cluster().health(new ClusterHealthRequest()).get()); - assertThat(e, hasToString(containsString("transport client is closed"))); - } - - /** - * test that when plugins are provided that want to register - * {@link NamedWriteable}, those are also made known to the - * {@link NamedWriteableRegistry} of the transport client - */ - public void testPluginNamedWriteablesRegistered() { - Settings baseSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - try (TransportClient client = new MockTransportClient(baseSettings, Arrays.asList(MockPlugin.class))) { - assertNotNull(client.namedWriteableRegistry.getReader(MockPlugin.MockNamedWriteable.class, MockPlugin.MockNamedWriteable.NAME)); - } - } - - public void testSettingsContainsTransportClient() { - final Settings baseSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - try (TransportClient client = new MockTransportClient(baseSettings, Arrays.asList(MockPlugin.class))) { - final Settings settings = TransportSettings.DEFAULT_FEATURES_SETTING.get(client.settings()); - assertThat(settings.keySet(), hasItem("transport_client")); - assertThat(settings.get("transport_client"), equalTo("true")); - } - } - - public void testDefaultHeader() { - final Settings baseSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - try (TransportClient client = new MockTransportClient(baseSettings, Arrays.asList(MockPlugin.class))) { - final ThreadContext threadContext = client.threadPool().getThreadContext(); - assertEquals("true", threadContext.getHeader("test")); - } - } - - public static class MockPlugin extends Plugin { - - @Override - public List getNamedWriteables() { - return Arrays.asList(new Entry[]{ new Entry(MockNamedWriteable.class, MockNamedWriteable.NAME, MockNamedWriteable::new)}); - } - - @Override - public Settings additionalSettings() { - return Settings.builder().put(ThreadContext.PREFIX + "." + "test", true).build(); - } - - public class MockNamedWriteable implements NamedWriteable { - - static final String NAME = "mockNamedWritable"; - - MockNamedWriteable(StreamInput in) { - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - } - - @Override - public String getWriteableName() { - return NAME; - } - - } - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java deleted file mode 100644 index fc917d60deede..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexGraveyard; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.CheckedFunction; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.watcher.ResourceWatcherService; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumSet; -import java.util.HashSet; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; -import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.instanceOf; - -/** - * This test suite sets up a situation where the cluster has two plugins installed (node, and node-and-transport-client), and a transport - * client only has node-and-transport-client plugin installed. Each of these plugins inject customs into the cluster state and we want to - * check that the client can de-serialize a cluster state response based on the fact that the response should not contain customs that the - * transport client does not understand based on the fact that it only presents the node-and-transport-client-feature. - */ -@ESIntegTestCase.ClusterScope(scope = TEST) -public class ClusterStateIT extends ESIntegTestCase { - - public abstract static - class Custom implements MetaData.Custom { - - private static final ParseField VALUE = new ParseField("value"); - - private final int value; - - int value() { - return value; - } - - Custom(final int value) { - this.value = value; - } - - Custom(final StreamInput in) throws IOException { - value = in.readInt(); - } - - @Override - public EnumSet context() { - return MetaData.ALL_CONTEXTS; - } - - @Override - public Diff diff(final MetaData.Custom previousState) { - return null; - } - - @Override - public void writeTo(final StreamOutput out) throws IOException { - out.writeInt(value); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(VALUE.getPreferredName(), value); - return builder; - } - - } - - public static class NodeCustom extends Custom { - - public static final String TYPE = "node"; - - NodeCustom(final int value) { - super(value); - } - - NodeCustom(final StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public Optional getRequiredFeature() { - return Optional.of("node"); - } - - } - - public static class NodeAndTransportClientCustom extends Custom { - - public static final String TYPE = "node-and-transport-client"; - - NodeAndTransportClientCustom(final int value) { - super(value); - } - - public NodeAndTransportClientCustom(final StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - /* - * This custom should always be returned yet we randomize whether it has a required feature that the client is expected to have - * versus not requiring any feature. We use a field to make the random choice exactly once. - */ - @SuppressWarnings("OptionalUsedAsFieldOrParameterType") - private final Optional requiredFeature = randomBoolean() ? Optional.empty() : Optional.of("node-and-transport-client"); - - @Override - public Optional getRequiredFeature() { - return requiredFeature; - } - - } - - public abstract static class CustomPlugin extends Plugin { - - private final List namedWritables = new ArrayList<>(); - private final List namedXContents = new ArrayList<>(); - - public CustomPlugin() { - registerBuiltinWritables(); - } - - protected void registerMetaDataCustom( - final String name, final Writeable.Reader reader, final CheckedFunction parser) { - namedWritables.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, name, reader)); - namedXContents.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(name), parser)); - } - - protected abstract void registerBuiltinWritables(); - - protected abstract String getType(); - - protected abstract Custom getInstance(); - - @Override - public List getNamedWriteables() { - return namedWritables; - } - - @Override - public List getNamedXContent() { - return namedXContents; - } - - private final AtomicBoolean installed = new AtomicBoolean(); - - @Override - public Collection createComponents( - final Client client, - final ClusterService clusterService, - final ThreadPool threadPool, - final ResourceWatcherService resourceWatcherService, - final ScriptService scriptService, - final NamedXContentRegistry xContentRegistry, - final Environment environment, - final NodeEnvironment nodeEnvironment, - final NamedWriteableRegistry namedWriteableRegistry) { - clusterService.addListener(event -> { - final ClusterState state = event.state(); - if (state.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) { - return; - } - - final MetaData metaData = state.metaData(); - if (state.nodes().isLocalNodeElectedMaster()) { - if (metaData.custom(getType()) == null) { - if (installed.compareAndSet(false, true)) { - clusterService.submitStateUpdateTask("install-metadata-custom", new ClusterStateUpdateTask(Priority.URGENT) { - - @Override - public ClusterState execute(ClusterState currentState) { - if (currentState.custom(getType()) == null) { - final MetaData.Builder builder = MetaData.builder(currentState.metaData()); - builder.putCustom(getType(), getInstance()); - return ClusterState.builder(currentState).metaData(builder).build(); - } else { - return currentState; - } - } - - @Override - public void onFailure(String source, Exception e) { - throw new AssertionError(e); - } - - }); - } - } - } - - }); - return Collections.emptyList(); - } - } - - public static class NodePlugin extends CustomPlugin { - - public Optional getFeature() { - return Optional.of("node"); - } - - static final int VALUE = randomInt(); - - @Override - protected void registerBuiltinWritables() { - registerMetaDataCustom( - NodeCustom.TYPE, - NodeCustom::new, - parser -> { - throw new IOException(new UnsupportedOperationException()); - }); - } - - @Override - protected String getType() { - return NodeCustom.TYPE; - } - - @Override - protected Custom getInstance() { - return new NodeCustom(VALUE); - } - - } - - public static class NodeAndTransportClientPlugin extends CustomPlugin { - - @Override - protected Optional getFeature() { - return Optional.of("node-and-transport-client"); - } - - static final int VALUE = randomInt(); - - @Override - protected void registerBuiltinWritables() { - registerMetaDataCustom( - NodeAndTransportClientCustom.TYPE, - NodeAndTransportClientCustom::new, - parser -> { - throw new IOException(new UnsupportedOperationException()); - }); - } - - @Override - protected String getType() { - return NodeAndTransportClientCustom.TYPE; - } - - @Override - protected Custom getInstance() { - return new NodeAndTransportClientCustom(VALUE); - } - - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(NodePlugin.class, NodeAndTransportClientPlugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(NodeAndTransportClientPlugin.class); - } - - public void testOptionalCustoms() throws Exception { - // ensure that the customs are injected into the cluster state - assertBusy(() -> assertTrue(clusterService().state().metaData().customs().containsKey(NodeCustom.TYPE))); - assertBusy(() -> assertTrue(clusterService().state().metaData().customs().containsKey(NodeAndTransportClientCustom.TYPE))); - final ClusterStateResponse state = internalCluster().transportClient().admin().cluster().prepareState().get(); - final ImmutableOpenMap customs = state.getState().metaData().customs(); - final Set keys = new HashSet<>(Arrays.asList(customs.keys().toArray(String.class))); - assertThat(keys, hasItem(IndexGraveyard.TYPE)); - assertThat(keys, not(hasItem(NodeCustom.TYPE))); - assertThat(keys, hasItem(NodeAndTransportClientCustom.TYPE)); - final MetaData.Custom actual = customs.get(NodeAndTransportClientCustom.TYPE); - assertThat(actual, instanceOf(NodeAndTransportClientCustom.class)); - assertThat(((NodeAndTransportClientCustom)actual).value(), equalTo(NodeAndTransportClientPlugin.VALUE)); - } - -} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index c31df3ade71cc..27036680880b2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -67,7 +67,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) @TestLogging("_root:DEBUG") public class RareClusterStateIT extends ESIntegTestCase { diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java index 2ad951312fa14..6ba738714ee8f 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java @@ -35,7 +35,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class ClusterDisruptionCleanSettingsIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 5bc5efc96c661..3215325e835bf 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -85,7 +85,7 @@ * Tests various cluster operations (e.g., indexing) during disruptions. */ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class ClusterDisruptionIT extends AbstractDisruptionTestCase { private enum ConflictMode { diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 923af536cbb58..29a75ea744cd8 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -48,7 +48,7 @@ * Tests for discovery during disruptions. */ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase { /** diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index aea0c8c5c25f9..37d8efd72c72f 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -51,7 +51,7 @@ * Tests relating to the loss of the master. */ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { /** diff --git a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java index b9ac5f33dd911..c8bb9e33c6eac 100644 --- a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -55,7 +55,7 @@ * Tests snapshot operations during disruptions. */ @TestLogging("org.elasticsearch.snapshot:TRACE") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class SnapshotDisruptionIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index c4655bcf7ce9a..1bb93123309eb 100644 --- a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -66,7 +66,7 @@ * not detect a master failure too quickly. */ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class StableMasterDisruptionIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index e0177af1bed39..1b90ea691c1cd 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -100,7 +100,7 @@ public Path nodeConfigPath(int nodeOrdinal) { "other", Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), Function.identity())) { - other.beforeTest(random(), 0); + other.beforeTest(random()); final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); final ClusterState second = other.getInstance(ClusterService.class).state(); assertThat(first.nodes().getSize(), equalTo(1)); @@ -175,7 +175,7 @@ public Path nodeConfigPath(int nodeOrdinal) { Logger clusterLogger = LogManager.getLogger(JoinHelper.class); Loggers.addAppender(clusterLogger, mockAppender); try { - other.beforeTest(random(), 0); + other.beforeTest(random()); final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); assertThat(first.nodes().getSize(), equalTo(1)); assertBusy(() -> mockAppender.assertAllExpectationsMatched()); diff --git a/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java b/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java index f1ebcd971741e..b23f709d7350d 100644 --- a/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java +++ b/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java @@ -37,11 +37,6 @@ protected Collection> nodePlugins() { return Arrays.asList(DummyQueryParserPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(DummyQueryParserPlugin.class); - } - @Override @Before public void setUp() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java b/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java index 91e73e53ebc15..409e007790ec4 100644 --- a/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java @@ -58,7 +58,7 @@ import static org.hamcrest.Matchers.greaterThan; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 2, - supportsDedicatedMasters = false, numClientNodes = 1, transportClientRatio = 0.0) + supportsDedicatedMasters = false, numClientNodes = 1) public class ExceptionRetryIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/indices/settings/InternalSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/InternalSettingsIT.java index 1d11fbc79fc71..f7935021c09f2 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/InternalSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/InternalSettingsIT.java @@ -38,11 +38,6 @@ protected Collection> nodePlugins() { return Collections.singleton(InternalOrPrivateSettingsPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(InternalOrPrivateSettingsPlugin.class); - } - public void testSetInternalIndexSettingOnCreate() { final Settings settings = Settings.builder().put("index.internal", "internal").build(); createIndex("index", settings); diff --git a/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java index 08f45eac5be64..c8f0740bc35aa 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java @@ -41,11 +41,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(InternalOrPrivateSettingsPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(InternalOrPrivateSettingsPlugin.class); - } - public void testSetPrivateIndexSettingOnCreate() { final Settings settings = Settings.builder().put("index.private", "private").build(); final Exception e = expectThrows(Exception.class, () -> createIndex("index", settings)); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java index 195e14f65c004..63ff7f9f97463 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java @@ -44,11 +44,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(TestPersistentTasksPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - protected boolean ignoreExternalCluster() { return true; } diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 4acb391d9c0ee..95e01c79e19d4 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -59,11 +59,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(TestPersistentTasksPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - protected boolean ignoreExternalCluster() { return true; } diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java index 2ea6567c9f8d0..8c6b82d5d4128 100644 --- a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -48,11 +48,6 @@ protected Collection> nodePlugins() { return singletonList(TestPersistentTasksPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override protected boolean ignoreExternalCluster() { return true; diff --git a/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java index 2c32d6eb33ba8..0506d18ffecbd 100644 --- a/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java @@ -38,7 +38,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0) +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class FullRollingRestartIT extends ESIntegTestCase { protected void assertTimeout(ClusterHealthRequestBuilder requestBuilder) { ClusterHealthResponse clusterHealth = requestBuilder.get(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index d21519fa96754..81ea71621ae3e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -94,11 +94,6 @@ protected Collection> nodePlugins() { return Arrays.asList(CustomSignificanceHeuristicPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(CustomSignificanceHeuristicPlugin.class); - } - public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString(); } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index e6f7ecf6b37e7..f2239d80d7f62 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -64,11 +64,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(FetchTermVectorsPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @SuppressWarnings("unchecked") public void testPlugin() throws Exception { client().admin() diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index cb67481d8ddd0..bfbf04a7f5ad8 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -59,11 +59,6 @@ protected Collection> nodePlugins() { return Arrays.asList(CustomDistanceScorePlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(CustomDistanceScorePlugin.class); - } - public void testPlugin() throws Exception { client().admin() .indices() diff --git a/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java index e0ae78dff3466..7ac48e03be688 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -434,9 +434,9 @@ public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception { assertThat(clearResponse.status(), equalTo(RestStatus.OK)); assertToXContentResponse(clearResponse, true, clearResponse.getNumFreed()); - assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse1.getScrollId()) + assertThrows(internalCluster().client().prepareSearchScroll(searchResponse1.getScrollId()) .setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND); - assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse2.getScrollId()) + assertThrows(internalCluster().client().prepareSearchScroll(searchResponse2.getScrollId()) .setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND); } @@ -484,7 +484,7 @@ public void testThatNonExistingScrollIdReturnsCorrectException() throws Exceptio ClearScrollResponse clearScrollResponse = client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); assertThat(clearScrollResponse.isSucceeded(), is(true)); - assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse.getScrollId()), RestStatus.NOT_FOUND); + assertThrows(internalCluster().client().prepareSearchScroll(searchResponse.getScrollId()), RestStatus.NOT_FOUND); } public void testStringSortMissingAscTerminates() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 3345fbd3f248e..4a28b1eeea440 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -114,7 +114,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { public static class TestCustomMetaDataPlugin extends Plugin { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java index 777918a7d5eba..20dd3693f78cd 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java @@ -41,7 +41,7 @@ import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.hasSize; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class SnapshotShardsServiceIT extends AbstractSnapshotIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/server/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index 5f286a5ff0ad4..e5febc2bc36ec 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/server/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -25,7 +25,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.hamcrest.RegexMatcher; import java.lang.management.ManagementFactory; @@ -93,7 +92,7 @@ public void testThreadNames() throws Exception { || threadName.contains("Keep-Alive-Timer")) { continue; } - String nodePrefix = "(" + Pattern.quote(InternalTestCluster.TRANSPORT_CLIENT_PREFIX) + ")?(" + + String nodePrefix = "(" + Pattern.quote(ESIntegTestCase.SUITE_CLUSTER_NODE_PREFIX) + "|" + Pattern.quote(ESIntegTestCase.TEST_CLUSTER_NODE_PREFIX) +")"; assertThat(threadName, RegexMatcher.matches("\\[" + nodePrefix + "\\d+\\]")); diff --git a/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java index 416f26e81972a..a6b3865de247b 100644 --- a/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -118,8 +118,7 @@ * stale or dirty, i.e., come from a stale primary or belong to a write that ends up being discarded. * */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, minNumDataNodes = 4, maxNumDataNodes = 6, - transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, minNumDataNodes = 4, maxNumDataNodes = 6) @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE," + "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE," + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 94a8e9b7728ce..0ab0afb6b0e7a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -62,7 +62,6 @@ import org.elasticsearch.client.Requests; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.RestoreInProgress; @@ -245,8 +244,6 @@ *

    * This class supports the following system properties (passed with -Dkey=value to the application) *

      - *
    • -D{@value #TESTS_CLIENT_RATIO} - a double value in the interval [0..1] which defines the ration between node - * and transport clients used
    • *
    • -D{@value #TESTS_ENABLE_MOCK_MODULES} - a boolean value to enable or disable mock modules. This is * useful to test the system without asserting modules that to make sure they don't hide any bugs in production.
    • *
    • - a random seed used to initialize the index random context. @@ -279,11 +276,6 @@ public abstract class ESIntegTestCase extends ESTestCase { public static final String SUITE_CLUSTER_NODE_PREFIX = "node_s"; public static final String TEST_CLUSTER_NODE_PREFIX = "node_t"; - /** - * Key used to set the transport client ratio via the commandline -D{@value #TESTS_CLIENT_RATIO} - */ - public static final String TESTS_CLIENT_RATIO = "tests.client.ratio"; - /** * Key used to eventually switch to using an external cluster and provide its transport addresses */ @@ -349,8 +341,6 @@ public abstract class ESIntegTestCase extends ESTestCase { private static TestCluster currentCluster; private static RestClient restClient = null; - private static final double TRANSPORT_CLIENT_RATIO = transportClientRatio(); - private static final Map, TestCluster> clusters = new IdentityHashMap<>(); private static ESIntegTestCase INSTANCE = null; // see @SuiteScope @@ -372,7 +362,7 @@ protected final boolean enableWarningsCheck() { protected final void beforeInternal() throws Exception { final Scope currentClusterScope = getCurrentClusterScope(); Callable setup = () -> { - cluster().beforeTest(random(), getPerTestTransportClientRatio()); + cluster().beforeTest(random()); cluster().wipe(excludeTemplates()); randomIndexTemplate(); return null; @@ -1128,28 +1118,13 @@ protected void ensureClusterStateConsistency() throws IOException { && masterId.equals(localClusterState.nodes().getMasterNodeId())) { try { assertEquals("cluster state UUID does not match", masterClusterState.stateUUID(), localClusterState.stateUUID()); - /* - * The cluster state received by the transport client can miss customs that the client does not understand. This - * means that we only expect equality in the cluster state including customs if the master client and the local - * client are of the same type (both or neither are transport clients). Otherwise, we can only assert equality - * modulo non-core customs. - */ - if (isTransportClient(masterClient) == isTransportClient(client)) { - // We cannot compare serialization bytes since serialization order of maps is not guaranteed - // but we can compare serialization sizes - they should be the same - assertEquals("cluster state size does not match", masterClusterStateSize, localClusterStateSize); - // Compare JSON serialization - assertNull( - "cluster state JSON serialization does not match", - differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap)); - } else { - // remove non-core customs and compare the cluster states - assertNull( - "cluster state JSON serialization does not match (after removing some customs)", - differenceBetweenMapsIgnoringArrayOrder( - convertToMap(removePluginCustoms(masterClusterState)), - convertToMap(removePluginCustoms(localClusterState)))); - } + // We cannot compare serialization bytes since serialization order of maps is not guaranteed + // but we can compare serialization sizes - they should be the same + assertEquals("cluster state size does not match", masterClusterStateSize, localClusterStateSize); + // Compare JSON serialization + assertNull( + "cluster state JSON serialization does not match", + differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap)); } catch (final AssertionError error) { logger.error( "Cluster state from master:\n{}\nLocal cluster state:\n{}", @@ -1163,21 +1138,6 @@ protected void ensureClusterStateConsistency() throws IOException { } - /** - * Tests if the client is a transport client or wraps a transport client. - * - * @param client the client to test - * @return true if the client is a transport client or a wrapped transport client - */ - private boolean isTransportClient(final Client client) { - if (TransportClient.class.isAssignableFrom(client.getClass())) { - return true; - } else if (client instanceof RandomizingClient) { - return isTransportClient(((RandomizingClient) client).in()); - } - return false; - } - private static final Set SAFE_METADATA_CUSTOMS = Set.of(IndexGraveyard.TYPE, IngestMetadata.TYPE, RepositoriesMetaData.TYPE, ScriptMetaData.TYPE); @@ -1382,8 +1342,7 @@ protected final void disableAllocation(String... indices) { } /** - * Returns a random admin client. This client can either be a node or a transport client pointing to any of - * the nodes in the cluster. + * Returns a random admin client. This client can be pointing to any of the nodes in the cluster. */ protected AdminClient admin() { return client().admin(); @@ -1658,12 +1617,6 @@ public enum Scope { * negative value means that the number of client nodes will be randomized. */ int numClientNodes() default InternalTestCluster.DEFAULT_NUM_CLIENT_NODES; - - /** - * Returns the transport client ratio. By default this returns -1 which means a random - * ratio in the interval [0..1] is used. - */ - double transportClientRatio() default -1; } private class LatchedActionListener implements ActionListener { @@ -1817,23 +1770,6 @@ protected Collection> nodePlugins() { return Collections.emptyList(); } - /** - * Returns a collection of plugins that should be loaded when creating a transport client. - */ - protected Collection> transportClientPlugins() { - return Collections.emptyList(); - } - - /** - * This method is used to obtain additional settings for clients created by the internal cluster. - * These settings will be applied on the client in addition to some randomized settings defined in - * the cluster. These settings will also override any other settings the internal cluster might - * add by default. - */ - protected Settings transportClientSettings() { - return Settings.EMPTY; - } - private ExternalTestCluster buildExternalCluster(String clusterAddresses, String clusterName) throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; @@ -1932,22 +1868,6 @@ public Path nodeConfigPath(int nodeOrdinal) { public Collection> nodePlugins() { return ESIntegTestCase.this.nodePlugins(); } - - @Override - public Settings transportClientSettings() { - return Settings.builder().put(initialTransportClientSettings.build()) - .put(ESIntegTestCase.this.transportClientSettings()).build(); - } - - @Override - public Collection> transportClientPlugins() { - Collection> plugins = ESIntegTestCase.this.transportClientPlugins(); - if (plugins.contains(getTestTransportPlugin()) == false) { - plugins = new ArrayList<>(plugins); - plugins.add(getTestTransportPlugin()); - } - return Collections.unmodifiableCollection(plugins); - } }; } @@ -2035,35 +1955,6 @@ public TransportRequestHandler interceptHandler( } } - /** - * Returns the client ratio configured via - */ - private static double transportClientRatio() { - String property = System.getProperty(TESTS_CLIENT_RATIO); - if (property == null || property.isEmpty()) { - return Double.NaN; - } - return Double.parseDouble(property); - } - - /** - * Returns the transport client ratio from the class level annotation or via - * {@link System#getProperty(String)} if available. If both are not available this will - * return a random ratio in the interval {@code [0..1]}. - */ - protected double getPerTestTransportClientRatio() { - final ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); - double perTestRatio = -1; - if (annotation != null) { - perTestRatio = annotation.transportClientRatio(); - } - if (perTestRatio == -1) { - return Double.isNaN(TRANSPORT_CLIENT_RATIO) ? randomDouble() : TRANSPORT_CLIENT_RATIO; - } - assert perTestRatio >= 0.0 && perTestRatio <= 1.0; - return perTestRatio; - } - /** * Returns path to a random directory that can be used to create a temporary file system repo */ diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 3b4f8c8f55d4c..695564690c4b1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -38,7 +38,6 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; @@ -61,12 +60,10 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -105,7 +102,6 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.MockTransportClient; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; @@ -153,7 +149,6 @@ import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.test.ESTestCase.assertBusy; import static org.elasticsearch.test.ESTestCase.awaitBusy; -import static org.elasticsearch.test.ESTestCase.getTestTransportType; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -170,7 +165,7 @@ * The cluster supports randomized configuration such that nodes started in the cluster will * automatically load asserting services tracking resources like file handles or open searchers. *

      - * The Cluster is bound to a test lifecycle where tests must call {@link #beforeTest(java.util.Random, double)} and + * The Cluster is bound to a test lifecycle where tests must call {@link #beforeTest(java.util.Random)} and * {@link #afterTest()} to initialize and reset the cluster in order to be more reproducible. The term "more" relates * to the async nature of Elasticsearch in combination with randomized testing. Once Threads and asynchronous calls * are involved reproducibility is very limited. This class should only be used through {@link ESIntegTestCase}. @@ -720,7 +715,7 @@ private static String getRoleSuffix(Settings settings) { public synchronized Client client() { ensureOpen(); /* Randomly return a client to one of the nodes in the cluster */ - return getOrBuildRandomNode().client(random); + return getOrBuildRandomNode().client(); } /** @@ -729,7 +724,7 @@ public synchronized Client client() { */ public Client dataNodeClient() { /* Randomly return a client to one of the nodes in the cluster */ - return getRandomNodeAndClient(DATA_NODE_PREDICATE).client(random); + return getRandomNodeAndClient(DATA_NODE_PREDICATE).client(); } /** @@ -762,12 +757,12 @@ public synchronized Client coordOnlyNodeClient() { ensureOpen(); NodeAndClient randomNodeAndClient = getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE); if (randomNodeAndClient != null) { - return randomNodeAndClient.client(random); + return randomNodeAndClient.client(); } int nodeId = nextNodeId.getAndIncrement(); Settings settings = getSettings(nodeId, random.nextLong(), Settings.EMPTY); startCoordinatingOnlyNode(settings); - return getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE).client(random); + return getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE).client(); } public synchronized String startCoordinatingOnlyNode(Settings settings) { @@ -777,26 +772,17 @@ public synchronized String startCoordinatingOnlyNode(Settings settings) { return startNode(builder); } - /** - * Returns a transport client - */ - public synchronized Client transportClient() { - // randomly return a transport client going to one of the nodes in the cluster - return getOrBuildRandomNode().transportClient(); - } - /** * Returns a node client to a given node. */ public Client client(String nodeName) { NodeAndClient nodeAndClient = nodes.get(nodeName); if (nodeAndClient != null) { - return nodeAndClient.client(random); + return nodeAndClient.client(); } throw new AssertionError("No node found with name: [" + nodeName + "]"); } - /** * Returns a "smart" node client to a random node in the cluster */ @@ -830,7 +816,6 @@ private final class NodeAndClient implements Closeable { private MockNode node; private final Settings originalNodeSettings; private Client nodeClient; - private Client transportClient; private final AtomicBoolean closed = new AtomicBoolean(false); private final String name; private final int nodeAndClientId; @@ -862,18 +847,11 @@ public boolean isMasterEligible() { return Node.NODE_MASTER_SETTING.get(node.settings()); } - Client client(Random random) { - double nextDouble = random.nextDouble(); - if (nextDouble < transportClientRatio) { - if (logger.isTraceEnabled()) { - logger.trace("Using transport client for node [{}] sniff: [{}]", node.settings().get("node.name"), false); - } - return getOrBuildTransportClient(); - } else { - return getOrBuildNodeClient(); - } + Client client() { + return getOrBuildNodeClient(); } + // TODO: collapse these together? Client nodeClient() { if (closed.get()) { throw new RuntimeException("already closed"); @@ -881,13 +859,6 @@ Client nodeClient() { return getOrBuildNodeClient(); } - Client transportClient() { - if (closed.get()) { - throw new RuntimeException("already closed"); - } - return getOrBuildTransportClient(); - } - private Client getOrBuildNodeClient() { synchronized (InternalTestCluster.this) { if (closed.get()) { @@ -900,28 +871,10 @@ private Client getOrBuildNodeClient() { } } - private Client getOrBuildTransportClient() { - synchronized (InternalTestCluster.this) { - if (closed.get()) { - throw new RuntimeException("already closed"); - } - if (transportClient == null) { - /* don't sniff client for now - doesn't work will all tests - * since it might throw NoNodeAvailableException if nodes are - * shut down. we first need support of transportClientRatio - * as annotations or so */ - transportClient = new TransportClientFactory(nodeConfigurationSource.transportClientSettings(), - baseDir, nodeConfigurationSource.transportClientPlugins()).client(node, clusterName); - } - return clientWrapper.apply(transportClient); - } - } - void resetClient() { if (closed.get() == false) { - Releasables.close(nodeClient, transportClient); + Releasables.close(nodeClient); nodeClient = null; - transportClient = null; } } @@ -1023,44 +976,9 @@ private void markNodeDataDirsAsNotEligibleForWipe(Node node) { } } - public static final String TRANSPORT_CLIENT_PREFIX = "transport_client_"; - - private static class TransportClientFactory { - private final Settings settings; - private final Path baseDir; - private final Collection> plugins; - - TransportClientFactory(Settings settings, Path baseDir, Collection> plugins) { - this.settings = settings != null ? settings : Settings.EMPTY; - this.baseDir = baseDir; - this.plugins = plugins; - } - - public Client client(Node node, String clusterName) { - TransportAddress addr = node.injector().getInstance(TransportService.class).boundAddress().publishAddress(); - Settings nodeSettings = node.settings(); - Builder builder = Settings.builder() - .put("client.transport.nodes_sampler_interval", "1s") - .put(Environment.PATH_HOME_SETTING.getKey(), baseDir) - .put("node.name", TRANSPORT_CLIENT_PREFIX + node.settings().get("node.name")) - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName).put("client.transport.sniff", false) - .put("logger.prefix", nodeSettings.get("logger.prefix", "")) - .put("logger.level", nodeSettings.get("logger.level", "INFO")) - .put(settings); - if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings)) { - builder.put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), NetworkModule.TRANSPORT_TYPE_SETTING.get(settings)); - } else { - builder.put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), getTestTransportType()); - } - TransportClient client = new MockTransportClient(builder.build(), plugins); - client.addTransportAddress(addr); - return client; - } - } - @Override - public synchronized void beforeTest(Random random, double transportClientRatio) throws IOException, InterruptedException { - super.beforeTest(random, transportClientRatio); + public synchronized void beforeTest(Random random) throws IOException, InterruptedException { + super.beforeTest(random); reset(true); } @@ -1807,7 +1725,7 @@ private void removeExclusions(Set excludedNodeIds) { if (excludedNodeIds.isEmpty() == false) { logger.info("removing voting config exclusions for {} after restart/shutdown", excludedNodeIds); try { - Client client = getRandomNodeAndClient(node -> excludedNodeIds.contains(node.name) == false).client(random); + Client client = getRandomNodeAndClient(node -> excludedNodeIds.contains(node.name) == false).client(); client.execute(ClearVotingConfigExclusionsAction.INSTANCE, new ClearVotingConfigExclusionsRequest()).get(); } catch (InterruptedException | ExecutionException e) { throw new AssertionError("unexpected", e); @@ -2253,7 +2171,7 @@ public boolean hasNext() { @Override public Client next() { - return iterator.next().client(random); + return iterator.next().client(); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index b5aa26a38549e..a469088bcb6c3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -52,8 +52,6 @@ public abstract class TestCluster implements Closeable { protected Random random; - protected double transportClientRatio = 0.0; - public TestCluster(long seed) { this.seed = seed; } @@ -65,10 +63,7 @@ public long seed() { /** * This method should be executed before each test to reset the cluster to its initial state. */ - public void beforeTest(Random random, double transportClientRatio) throws IOException, InterruptedException { - assert transportClientRatio >= 0.0 && transportClientRatio <= 1.0; - logger.debug("Reset test cluster with transport client ratio: [{}]", transportClientRatio); - this.transportClientRatio = transportClientRatio; + public void beforeTest(Random random) throws IOException, InterruptedException { this.random = new Random(random.nextLong()); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index a690e4bbbdd21..6fffd246ec245 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -192,11 +192,11 @@ public Settings transportClientSettings() { try { { Random random = new Random(seed); - cluster0.beforeTest(random, random.nextDouble()); + cluster0.beforeTest(random); } { Random random = new Random(seed); - cluster1.beforeTest(random, random.nextDouble()); + cluster1.beforeTest(random); } assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames()); Iterator iterator1 = cluster1.getClients().iterator(); @@ -248,7 +248,7 @@ public Settings transportClientSettings() { true, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, nodePrefix, mockPlugins(), Function.identity()); try { - cluster.beforeTest(random(), 0.0); + cluster.beforeTest(random()); final int originalMasterCount = cluster.numMasterNodes(); final Map shardNodePaths = new HashMap<>(); for (String name: cluster.getNodeNames()) { @@ -279,7 +279,7 @@ public Settings transportClientSettings() { Files.createDirectories(newTestMarker); final String newNode3 = cluster.startNode(poorNodeDataPathSettings); assertThat(getNodePaths(cluster, newNode3)[0], equalTo(dataPath)); - cluster.beforeTest(random(), 0.0); + cluster.beforeTest(random()); assertFileNotExists(newTestMarker); // the cluster should be reset for a new test, cleaning up the extra path we made assertFileNotExists(testMarker); // a new unknown node used this path, it should be cleaned assertFileExists(stableTestMarker); // but leaving the structure of existing, reused nodes @@ -287,7 +287,7 @@ public Settings transportClientSettings() { assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), equalTo(shardNodePaths.get(name))); } - cluster.beforeTest(random(), 0.0); + cluster.beforeTest(random()); assertFileExists(stableTestMarker); // but leaving the structure of existing, reused nodes for (String name: cluster.getNodeNames()) { assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), @@ -336,7 +336,7 @@ public Settings transportClientSettings() { .put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build(); } }, 0, "", mockPlugins(), Function.identity()); - cluster.beforeTest(random(), 0.0); + cluster.beforeTest(random()); List roles = new ArrayList<>(); for (int i = 0; i < numNodes; i++) { final DiscoveryNode.Role role = i == numNodes - 1 && roles.contains(MASTER) == false ? @@ -426,7 +426,7 @@ public Settings transportClientSettings() { "test", nodeConfigurationSource, 0, nodePrefix, plugins, Function.identity()); try { - cluster.beforeTest(random(), 0.0); + cluster.beforeTest(random()); switch (randomInt(2)) { case 0: cluster.stopRandomDataNode(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index eba3532f063bf..fd7c8bd4086ef 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -154,7 +154,7 @@ public final void startClusters() throws Exception { InternalTestCluster leaderCluster = new InternalTestCluster(randomLong(), createTempDir(), true, true, numberOfNodesPerCluster(), numberOfNodesPerCluster(), "leader_cluster", createNodeConfigurationSource(null, true), 0, "leader", mockPlugins, Function.identity()); - leaderCluster.beforeTest(random(), 0.0D); + leaderCluster.beforeTest(random()); leaderCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster()); assertBusy(() -> { ClusterService clusterService = leaderCluster.getInstance(ClusterService.class); @@ -167,7 +167,7 @@ public final void startClusters() throws Exception { mockPlugins, Function.identity()); clusterGroup = new ClusterGroup(leaderCluster, followerCluster); - followerCluster.beforeTest(random(), 0.0D); + followerCluster.beforeTest(random()); followerCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster()); assertBusy(() -> { ClusterService clusterService = followerCluster.getInstance(ClusterService.class); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrDisabledIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrDisabledIT.java index 92e0ea06a30e7..3e182ef20d699 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrDisabledIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrDisabledIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.XPackSettings; import java.util.Collection; @@ -28,18 +27,8 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); } - @Override - protected Settings transportClientSettings() { - return Settings.builder().put(super.transportClientSettings()).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); - } - @Override protected Collection> nodePlugins() { return Collections.singletonList(LocalStateCcr.class); } - - @Override - protected Collection> transportClientPlugins() { - return Collections.singletonList(XPackClientPlugin.class); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java index 2d4991d514027..58caf0c512b08 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java @@ -15,14 +15,12 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.XPackSettings; import java.util.Arrays; import java.util.Collection; import java.util.concurrent.CountDownLatch; -@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public abstract class AbstractLicensesIntegrationTestCase extends ESIntegTestCase { @Override @@ -35,17 +33,6 @@ protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, CommonAnalysisPlugin.class); - } - - @Override - protected Settings transportClientSettings() { - // Plugin should be loaded on the transport client as well - return nodeSettings(0); - } - protected void putLicense(final License license) throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java index 00d1c47cdedaa..10a441526400d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java @@ -25,7 +25,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.nullValue; -@ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0, transportClientRatio = 0) +@ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0) public class LicenseServiceClusterTests extends AbstractLicensesIntegrationTestCase { @Override @@ -50,11 +50,6 @@ protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class, Netty4Plugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - public void testClusterRestartWithLicense() throws Exception { wipeAllLicenses(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java index 1f09f959883f3..9a2cb24e48b72 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.Netty4Plugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; @@ -25,7 +24,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -@ESIntegTestCase.ClusterScope(scope = SUITE, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(scope = SUITE) public class StartBasicLicenseTests extends AbstractLicensesIntegrationTestCase { @Override @@ -46,11 +45,6 @@ protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class); - } - public void testStartBasicLicense() throws Exception { LicensingClient licensingClient = new LicensingClient(client()); License license = TestUtils.generateSignedLicense("trial", License.VERSION_CURRENT, -1, TimeValue.timeValueHours(24)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java index eac145dd0ffa8..537df2a4a51ed 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.Netty4Plugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; @@ -24,7 +23,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -@ESIntegTestCase.ClusterScope(scope = SUITE, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(scope = SUITE) public class StartTrialLicenseTests extends AbstractLicensesIntegrationTestCase { @Override @@ -45,11 +44,6 @@ protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class); - } - public void testStartTrial() throws Exception { LicensingClient licensingClient = new LicensingClient(client()); ensureStartingWithBasic(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java index b03f51d1d195b..81be978d33103 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java @@ -54,7 +54,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -@ESIntegTestCase.ClusterScope(numDataNodes = 0, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(numDataNodes = 0) public class SourceOnlySnapshotIT extends ESIntegTestCase { @Override diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java index 673c10f885447..cc23579547ed0 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java @@ -77,7 +77,7 @@ import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNull.nullValue; -@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class IndexLifecycleInitialisationTests extends ESIntegTestCase { private Settings settings; private LifecyclePolicy lifecyclePolicy; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index b73bb7c5ad499..aae9b0d89c2a0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -67,8 +67,7 @@ * Note for other type of integration tests you should use the external test cluster created by the Gradle integTest task. * For example tests extending this base class test with the non native autodetect process. */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, - transportClientRatio = 0, supportsDedicatedMasters = false) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, supportsDedicatedMasters = false) public abstract class BaseMlIntegTestCase extends ESIntegTestCase { @Override @@ -89,28 +88,12 @@ protected Settings nodeSettings(int nodeOrdinal) { return settings.build(); } - @Override - protected Settings transportClientSettings() { - Settings.Builder settings = Settings.builder().put(super.transportClientSettings()); - settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), true); - settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); - settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); - settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); - settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); - return settings.build(); - } - @Override protected Collection> nodePlugins() { return Arrays.asList(LocalStateMachineLearning.class, CommonAnalysisPlugin.class, ReindexPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override protected Collection> getMockPlugins() { return Arrays.asList(TestSeedPlugin.class, MockHttpTransport.TestPlugin.class); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java index 057c6d8f3a225..3aef6dad5889f 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java @@ -24,7 +24,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) +@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class MultiNodesStatsTests extends MonitoringIntegTestCase { @Override diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/AbstractIndicesCleanerTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/AbstractIndicesCleanerTestCase.java index 42d89608efd46..588882e91f854 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/AbstractIndicesCleanerTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/AbstractIndicesCleanerTestCase.java @@ -21,7 +21,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; -@ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) +@ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0) public abstract class AbstractIndicesCleanerTestCase extends MonitoringIntegTestCase { static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("yyyy.MM.dd").withZone(ZoneOffset.UTC); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java index d4ea017ca8b3d..aa58b9fa60660 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java @@ -81,7 +81,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = Scope.TEST, - numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) + numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) public class HttpExporterIT extends MonitoringIntegTestCase { private final List clusterAlertBlacklist = diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java index 2621a43ccee4d..34885563c87ac 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java @@ -40,7 +40,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = Scope.SUITE, - numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) + numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) public class HttpExporterSslIT extends MonitoringIntegTestCase { private final Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java index ed5d3ef40ae24..5db71f72cf6ef 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java @@ -58,7 +58,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, - numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) + numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) public class LocalExporterIntegTests extends LocalExporterIntegTestCase { private final String indexTimeFormat = randomFrom("yy", "yyyy", "yyyy.MM", "yyyy-MM", "MM.yyyy", "MM", null); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java index 4af080b7fabde..2604aad151cd3 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java @@ -28,7 +28,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = TEST, - numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) + numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) public class LocalExporterResourceIntegTests extends LocalExporterIntegTestCase { public LocalExporterResourceIntegTests() throws Exception { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java index d716e2e479f4a..c350b9a374ab2 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java @@ -20,8 +20,6 @@ import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; import org.elasticsearch.xpack.core.monitoring.test.MockPainlessScriptEngine; @@ -63,14 +61,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } - @Override - protected Settings transportClientSettings() { - return Settings.builder().put(super.transportClientSettings()) -// .put(XPackSettings.SECURITY_ENABLED.getKey(), false) - .put(XPackSettings.WATCHER_ENABLED.getKey(), false) - .build(); - } - @Override protected Collection> getMockPlugins() { Set> plugins = new HashSet<>(super.getMockPlugins()); @@ -85,12 +75,6 @@ protected Collection> nodePlugins() { MockIngestPlugin.class, CommonAnalysisPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, MockPainlessScriptEngine.TestPlugin.class, - MockIngestPlugin.class, CommonAnalysisPlugin.class); - } - protected MonitoringClient monitoringClient() { return randomBoolean() ? new XPackClient(client()).monitoring() : new MonitoringClient(client()); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java index 77f5b6c57b4c3..dc8f93a0cdcf7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -86,11 +86,6 @@ protected Collection> nodePlugins() { return Arrays.asList(LocalStateSecurity.class, CommonAnalysisPlugin.class, ParentJoinPlugin.class, InternalSettingsPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override protected String configUsers() { final String usersPasswdHashed = new String(getFastStoredHashAlgoForTests().hash(USERS_PASSWD)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index 5e83ef99563d9..c5ff8242b6bec 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.xpack.core.XPackSettings; @@ -63,8 +62,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -// The random usage of meta fields such as _timestamp add noise to the test, so disable random index templates: -@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public class FieldLevelSecurityTests extends SecurityIntegTestCase { protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); @@ -75,11 +72,6 @@ protected Collection> nodePlugins() { InternalSettingsPlugin.class); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - @Override protected String configUsers() { final String usersPasswHashed = new String(getFastStoredHashAlgoForTests().hash(USERS_PASSWD)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java index 349bef3fc3152..87db72bcf0285 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java @@ -18,7 +18,7 @@ /** * Integration test that uses multiple data nodes to test that the shrink index api works with security. */ -@ClusterScope(minNumDataNodes = 2, transportClientRatio = 0.0) +@ClusterScope(minNumDataNodes = 2) public class ShrinkIndexWithSecurityTests extends SecurityIntegTestCase { @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index d862d248976da..a3b75b188a278 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -74,7 +74,6 @@ * * @see SecuritySettingsSource */ -@ESIntegTestCase.ClusterScope(transportClientRatio = 0.0) public abstract class SecurityIntegTestCase extends ESIntegTestCase { private static SecuritySettingsSource SECURITY_DEFAULT_SETTINGS; @@ -84,7 +83,7 @@ public abstract class SecurityIntegTestCase extends ESIntegTestCase { * Settings used when the {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} * so that some of the configuration parameters can be overridden through test instance methods, similarly - * to how {@link #nodeSettings(int)} and {@link #transportClientSettings()} work. + * to how {@link #nodeSettings(int)} works. */ private static CustomSecuritySettingsSource customSecuritySettingsSource = null; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java index c8238ab49b146..0367fc4f74be7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java @@ -5,20 +5,11 @@ */ package org.elasticsearch.transport; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor; // this class sits in org.elasticsearch.transport so that TransportService.requestHandlers is visible public class SecurityServerTransportServiceTests extends SecurityIntegTestCase { - @Override - protected Settings transportClientSettings() { - return Settings.builder() - .put(super.transportClientSettings()) - .put(XPackSettings.SECURITY_ENABLED.getKey(), true) - .build(); - } public void testSecurityServerTransportServiceWrapsAllHandlers() { for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java index b04b8c8ac3d36..f6e5552ddbc53 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java @@ -32,7 +32,7 @@ * templates when started within security, as this requires certain * system privileges */ -@ClusterScope(maxNumDataNodes = 1, scope = Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0) +@ClusterScope(maxNumDataNodes = 1, scope = Scope.SUITE, numClientNodes = 0) public class TemplateUpgraderTests extends SecurityIntegTestCase { public void testTemplatesWorkAsExpected() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java index 866c52989af6f..23408f5668ec9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java @@ -29,7 +29,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -@ClusterScope(scope = TEST, numDataNodes = 1, transportClientRatio = 0.0) +@ClusterScope(scope = TEST, numDataNodes = 1) public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase { private static Settings startupFilterSettings; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java index 9f0b7863d30e7..2284a920eac11 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java @@ -25,7 +25,7 @@ import static org.hamcrest.Matchers.is; // no client nodes, no transport clients, as they all get rejected on network connections -@ClusterScope(scope = Scope.SUITE, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) +@ClusterScope(scope = Scope.SUITE, numDataNodes = 0, numClientNodes = 0) public class IpFilteringIntegrationTests extends SecurityIntegTestCase { private static int randomClientPort; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java index 96922aa8822e4..65a5fb080cdb0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java @@ -21,7 +21,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.is; -@ClusterScope(scope = TEST, supportsDedicatedMasters = false, numDataNodes = 1, transportClientRatio = 0.0) +@ClusterScope(scope = TEST, supportsDedicatedMasters = false, numDataNodes = 1) public class IpFilteringUpdateTests extends SecurityIntegTestCase { private static int randomClientPort; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java index 71b3d7a1990b4..24e6a5bca009b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java @@ -54,21 +54,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - @Override - protected Settings transportClientSettings() { - final Path keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-key.pem"); - final Path certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-cert.pem"); - return Settings.builder() - .put(super.transportClientSettings().filter(s -> s.startsWith("xpack.security.transport.ssl") == false)) - .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.key", keyPath) - .put("xpack.security.transport.ssl.certificate", certPath) - .put("xpack.security.transport.ssl.certificate_authorities", certPath) - // disable hostname verificate since these certs aren't setup for that - .put("xpack.security.transport.ssl.verification_mode", "certificate") - .build(); - } - @Override protected boolean transportSSLEnabled() { return true; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java index 9c540f559b688..e2df24643aec3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java @@ -49,7 +49,7 @@ * * @see RestrictedTrustManager */ -@ESIntegTestCase.ClusterScope(numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false, transportClientRatio = 0.0) +@ESIntegTestCase.ClusterScope(numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) @TestLogging("org.elasticsearch.xpack.ssl.RestrictedTrustManager:DEBUG") public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/AbstractSqlIntegTestCase.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/AbstractSqlIntegTestCase.java index c741667ba9ebf..58af33172da6e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/AbstractSqlIntegTestCase.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/AbstractSqlIntegTestCase.java @@ -16,7 +16,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; -@ESIntegTestCase.ClusterScope(scope = SUITE, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(scope = SUITE, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0) public abstract class AbstractSqlIntegTestCase extends ESIntegTestCase { @Override @@ -35,10 +35,5 @@ protected Settings nodeSettings(int nodeOrdinal) { protected Collection> nodePlugins() { return Collections.singletonList(LocalStateSQLXPackPlugin.class); } - - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlDisabledIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlDisabledIT.java index 51be147005173..335ab8bc84928 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlDisabledIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlDisabledIT.java @@ -31,14 +31,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - @Override - protected Settings transportClientSettings() { - return Settings.builder() - .put(super.transportClientSettings()) - .put(XPackSettings.SQL_ENABLED.getKey(), randomBoolean()) - .build(); - } - public void testSqlAction() { Throwable throwable = expectThrows(Throwable.class, () -> new SqlQueryRequestBuilder(client(), SqlQueryAction.INSTANCE).query("SHOW tables").get()); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 8c44ba831b359..5b11b444db3ca 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -44,7 +43,6 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.client.WatcherClient; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; @@ -97,7 +95,7 @@ import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNot.not; -@ClusterScope(scope = SUITE, numClientNodes = 0, transportClientRatio = 0, maxNumDataNodes = 3) +@ClusterScope(scope = SUITE, numClientNodes = 0, maxNumDataNodes = 3) public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase { public static final String WATCHER_LANG = Script.DEFAULT_SCRIPT_LANG; @@ -119,15 +117,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - @Override - protected Settings transportClientSettings() { - return Settings.builder() - .put("client.transport.sniff", false) - .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4) - .put(NetworkModule.HTTP_TYPE_KEY, SecurityField.NAME4) - .build(); - } - @Override protected Set excludeTemplates() { Set excludes = new HashSet<>(); @@ -152,11 +141,6 @@ protected Collection> nodePlugins() { return pluginTypes(); } - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - protected List> pluginTypes() { List> types = new ArrayList<>(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java index b03d75af113af..318d1f1a8b1a1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java @@ -29,7 +29,7 @@ import static org.hamcrest.Matchers.is; @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36782") -@ClusterScope(scope = SUITE, numClientNodes = 0, transportClientRatio = 0, maxNumDataNodes = 1, supportsDedicatedMasters = false) +@ClusterScope(scope = SUITE, numClientNodes = 0, maxNumDataNodes = 1, supportsDedicatedMasters = false) public class SingleNodeTests extends AbstractWatcherIntegrationTestCase { @Override From 9b800a5801606b0bb24313dc2a99ccb1e256ff0d Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Fri, 24 May 2019 13:39:29 -0400 Subject: [PATCH 258/321] [DOCS] Fix nested def list for Asciidoctor (#42353) --- .../settings/notification-settings.asciidoc | 69 ++++++++++--------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index 400b55271f975..77f755b09e285 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -304,38 +304,39 @@ Specifies account information for sending notifications via PagerDuty. You can specify the following PagerDuty account attributes: + -- - `name`;; - A name for the PagerDuty account associated with the API key you - are using to access PagerDuty. Required. - - `secure_service_api_key` (<>);; - The https://developer.pagerduty.com/documentation/rest/authentication[ - PagerDuty API key] to use to access PagerDuty. Required. - - - `event_defaults`;; - Default values for {xpack-ref}/actions-pagerduty.html#pagerduty-event-trigger-incident-attributes[ - PagerDuty event attributes]. Optional. - + - `description`:: - A string that contains the default description for PagerDuty events. - If no default is configured, each PagerDuty action must specify a - `description`. - + - `incident_key`:: - A string that contains the default incident key to use when sending - PagerDuty events. - + - `client`:: - A string that specifies the default monitoring client. - + - `client_url`:: - The URL of the default monitoring client. - + - `event_type`:: - The default event type. Valid values: `trigger`,`resolve`, `acknowledge`. - + - `attach_payload`:: - Whether or not to provide the watch payload as context for - the event by default. Valid values: `true`, `false`. +`name`;; +A name for the PagerDuty account associated with the API key you +are using to access PagerDuty. Required. + +`secure_service_api_key` (<>);; +The https://developer.pagerduty.com/documentation/rest/authentication[ +PagerDuty API key] to use to access PagerDuty. Required. +-- ++ +`event_defaults`;; +Default values for {xpack-ref}/actions-pagerduty.html#pagerduty-event-trigger-incident-attributes[ +PagerDuty event attributes]. Optional. ++ +-- +`description`:: +A string that contains the default description for PagerDuty events. +If no default is configured, each PagerDuty action must specify a +`description`. + +`incident_key`:: +A string that contains the default incident key to use when sending +PagerDuty events. + +`client`:: +A string that specifies the default monitoring client. + +`client_url`:: +The URL of the default monitoring client. + +`event_type`:: +The default event type. Valid values: `trigger`,`resolve`, `acknowledge`. + +`attach_payload`:: +Whether or not to provide the watch payload as context for +the event by default. Valid values: `true`, `false`. -- \ No newline at end of file From 37be0a164f006d074d4675abc55b215828f27fa8 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 24 May 2019 18:44:51 +0100 Subject: [PATCH 259/321] [DOCS] Adding ML-specific prerequisites to setup docs (#42529) --- docs/reference/settings/ml-settings.asciidoc | 5 +++++ docs/reference/setup/install/windows.asciidoc | 8 ++++++++ docs/reference/setup/install/zip-windows.asciidoc | 8 ++++++++ 3 files changed, 21 insertions(+) diff --git a/docs/reference/settings/ml-settings.asciidoc b/docs/reference/settings/ml-settings.asciidoc index dbc11223f40ce..09fb8adad8523 100644 --- a/docs/reference/settings/ml-settings.asciidoc +++ b/docs/reference/settings/ml-settings.asciidoc @@ -8,6 +8,11 @@ You do not need to configure any settings to use {ml}. It is enabled by default. +IMPORTANT: {ml-cap} uses SSE4.2 instructions, so will only work on machines whose +CPUs https://en.wikipedia.org/wiki/SSE4#Supporting_CPUs[support] SSE4.2. If you +run {es} on older hardware you must disable {ml} (by setting `xpack.ml.enabled` +to `false`). + All of these settings can be added to the `elasticsearch.yml` configuration file. The dynamic settings can also be updated across a cluster with the <>. diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index 83d1251148c4a..e53e8d4122070 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -12,6 +12,14 @@ You can continue using the `.zip` approach if you prefer. include::license.asciidoc[] +NOTE: On Windows the Elasticsearch {ml} feature requires the Microsoft Universal +C Runtime library. This is built into Windows 10, Windows Server 2016 and more +recent versions of Windows. For older versions of Windows it can be installed +via Windows Update, or from a +https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows[separate download]. +If you cannot install the Microsoft Universal C Runtime library you can still +use the rest of Elasticsearch if you disable the {ml} feature. + The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index efed4b613c54b..669e3c72ea8b8 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -11,6 +11,14 @@ experience for Windows. You can continue using the `.zip` approach if you prefer include::license.asciidoc[] +NOTE: On Windows the Elasticsearch {ml} feature requires the Microsoft Universal +C Runtime library. This is built into Windows 10, Windows Server 2016 and more +recent versions of Windows. For older versions of Windows it can be installed +via Windows Update, or from a +https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows[separate download]. +If you cannot install the Microsoft Universal C Runtime library you can still +use the rest of Elasticsearch if you disable the {ml} feature. + The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the From e87c8b22c6f2e256a72e417e1436b12be2ccab40 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 24 May 2019 10:53:24 -0700 Subject: [PATCH 260/321] Fix compilation This test was added while a PR removing transportClientRatio was in flight. --- .../cluster/state/TransportClusterStateActionDisruptionIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java index 0d51f647ee28c..3b2b7d997d708 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java @@ -46,7 +46,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; -@ESIntegTestCase.ClusterScope(numDataNodes = 0, scope = ESIntegTestCase.Scope.TEST, transportClientRatio = 0) +@ESIntegTestCase.ClusterScope(numDataNodes = 0, scope = ESIntegTestCase.Scope.TEST) public class TransportClusterStateActionDisruptionIT extends ESIntegTestCase { @Override From 44c15512ffc92c785874ab44876f0ec79056d523 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Fri, 24 May 2019 19:54:06 +0200 Subject: [PATCH 261/321] [ML-DataFrame] add support for fixed_interval, calendar_interval, remove interval (#42427) * add support for fixed_interval, calendar_interval, remove interval * adapt HLRC * checkstyle * add a hlrc to server test * adapt yml test * improve naming and doc * improve interface and add test code for hlrc to server * address review comments * repair merge conflict * fix date patterns * address review comments * remove assert for warning * improve exception message * use constants --- .../pivot/DateHistogramGroupSource.java | 231 ++++++++++++----- .../pivot/DateHistogramGroupSourceTests.java | 17 +- .../hlrc/DateHistogramGroupSourceTests.java | 79 ++++++ .../pivot/DateHistogramGroupSource.java | 235 ++++++++++++++---- .../pivot/DateHistogramGroupSourceTests.java | 10 +- .../integration/DataFrameIntegTestCase.java | 25 +- .../integration/DataFrameTransformIT.java | 8 +- .../integration/DataFramePivotRestIT.java | 19 +- .../test/data_frame/preview_transforms.yml | 15 +- 9 files changed, 476 insertions(+), 163 deletions(-) create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java index 71e7e258c5c8b..d880bfd82140b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java @@ -20,9 +20,9 @@ package org.elasticsearch.client.dataframe.transforms.pivot; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -31,7 +31,11 @@ import java.io.IOException; import java.time.ZoneId; import java.time.ZoneOffset; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; import java.util.Objects; +import java.util.Set; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -43,32 +47,164 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXCo private static final ParseField TIME_ZONE = new ParseField("time_zone"); private static final ParseField FORMAT = new ParseField("format"); + // From DateHistogramAggregationBuilder in core, transplanted and modified to a set + // so we don't need to import a dependency on the class + private static final Set DATE_FIELD_UNITS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( + "year", + "1y", + "quarter", + "1q", + "month", + "1M", + "week", + "1w", + "day", + "1d", + "hour", + "1h", + "minute", + "1m", + "second", + "1s"))); + + /** + * Interval can be specified in 2 ways: + * + * fixed_interval fixed intervals like 1h, 1m, 1d + * calendar_interval calendar aware intervals like 1M, 1Y, ... + * + * Note: data frames do not support the deprecated interval option + */ + public interface Interval extends ToXContentFragment { + String getName(); + DateHistogramInterval getInterval(); + } + + public static class FixedInterval implements Interval { + private static final String NAME = "fixed_interval"; + private final DateHistogramInterval interval; + + public FixedInterval(DateHistogramInterval interval) { + this.interval = interval; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public DateHistogramInterval getInterval() { + return interval; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(NAME); + interval.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final FixedInterval that = (FixedInterval) other; + return Objects.equals(this.interval, that.interval); + } + + @Override + public int hashCode() { + return Objects.hash(interval); + } + } + + public static class CalendarInterval implements Interval { + private static final String NAME = "calendar_interval"; + private final DateHistogramInterval interval; + + public CalendarInterval(DateHistogramInterval interval) { + this.interval = interval; + if (DATE_FIELD_UNITS.contains(interval.toString()) == false) { + throw new IllegalArgumentException("The supplied interval [" + interval + "] could not be parsed " + + "as a calendar interval."); + } + } + + @Override + public String getName() { + return NAME; + } + + @Override + public DateHistogramInterval getInterval() { + return interval; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(NAME); + interval.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final CalendarInterval that = (CalendarInterval) other; + return Objects.equals(this.interval, that.interval); + } + + @Override + public int hashCode() { + return Objects.hash(interval); + } + } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("date_histogram_group_source", true, (args) -> { String field = (String)args[0]; - long interval = 0; - DateHistogramInterval dateHistogramInterval = null; - if (args[1] instanceof Long) { - interval = (Long)args[1]; + String fixedInterval = (String) args[1]; + String calendarInterval = (String) args[2]; + + Interval interval = null; + + if (fixedInterval != null && calendarInterval != null) { + throw new IllegalArgumentException("You must specify either fixed_interval or calendar_interval, found both"); + } else if (fixedInterval != null) { + interval = new FixedInterval(new DateHistogramInterval(fixedInterval)); + } else if (calendarInterval != null) { + interval = new CalendarInterval(new DateHistogramInterval(calendarInterval)); } else { - dateHistogramInterval = (DateHistogramInterval) args[1]; + throw new IllegalArgumentException("You must specify either fixed_interval or calendar_interval, found none"); } - ZoneId zoneId = (ZoneId) args[2]; - String format = (String) args[3]; - return new DateHistogramGroupSource(field, interval, dateHistogramInterval, format, zoneId); + + ZoneId zoneId = (ZoneId) args[3]; + String format = (String) args[4]; + return new DateHistogramGroupSource(field, interval, format, zoneId); }); static { PARSER.declareString(optionalConstructorArg(), FIELD); - PARSER.declareField(optionalConstructorArg(), p -> { - if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { - return p.longValue(); - } else { - return new DateHistogramInterval(p.text()); - } - }, HistogramGroupSource.INTERVAL, ObjectParser.ValueType.LONG); + + PARSER.declareString(optionalConstructorArg(), new ParseField(FixedInterval.NAME)); + PARSER.declareString(optionalConstructorArg(), new ParseField(CalendarInterval.NAME)); + PARSER.declareField(optionalConstructorArg(), p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { return ZoneId.of(p.text()); @@ -84,15 +220,13 @@ public static DateHistogramGroupSource fromXContent(final XContentParser parser) return PARSER.apply(parser, null); } - private final long interval; - private final DateHistogramInterval dateHistogramInterval; + private final Interval interval; private final String format; private final ZoneId timeZone; - DateHistogramGroupSource(String field, long interval, DateHistogramInterval dateHistogramInterval, String format, ZoneId timeZone) { + DateHistogramGroupSource(String field, Interval interval, String format, ZoneId timeZone) { super(field); this.interval = interval; - this.dateHistogramInterval = dateHistogramInterval; this.format = format; this.timeZone = timeZone; } @@ -102,14 +236,10 @@ public Type getType() { return Type.DATE_HISTOGRAM; } - public long getInterval() { + public Interval getInterval() { return interval; } - public DateHistogramInterval getDateHistogramInterval() { - return dateHistogramInterval; - } - public String getFormat() { return format; } @@ -124,11 +254,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (field != null) { builder.field(FIELD.getPreferredName(), field); } - if (dateHistogramInterval == null) { - builder.field(HistogramGroupSource.INTERVAL.getPreferredName(), interval); - } else { - builder.field(HistogramGroupSource.INTERVAL.getPreferredName(), dateHistogramInterval.toString()); - } + interval.toXContent(builder, params); if (timeZone != null) { builder.field(TIME_ZONE.getPreferredName(), timeZone.toString()); } @@ -152,15 +278,14 @@ public boolean equals(Object other) { final DateHistogramGroupSource that = (DateHistogramGroupSource) other; return Objects.equals(this.field, that.field) && - Objects.equals(interval, that.interval) && - Objects.equals(dateHistogramInterval, that.dateHistogramInterval) && - Objects.equals(timeZone, that.timeZone) && - Objects.equals(format, that.format); + Objects.equals(this.interval, that.interval) && + Objects.equals(this.timeZone, that.timeZone) && + Objects.equals(this.format, that.format); } @Override public int hashCode() { - return Objects.hash(field, interval, dateHistogramInterval, timeZone, format); + return Objects.hash(field, interval, timeZone, format); } public static Builder builder() { @@ -170,8 +295,7 @@ public static Builder builder() { public static class Builder { private String field; - private long interval = 0; - private DateHistogramInterval dateHistogramInterval; + private Interval interval; private String format; private ZoneId timeZone; @@ -187,41 +311,14 @@ public Builder setField(String field) { /** * Set the interval for the DateHistogram grouping - * @param interval the time interval in milliseconds + * @param interval a fixed or calendar interval * @return the {@link Builder} with the interval set. */ - public Builder setInterval(long interval) { - if (interval < 1) { - throw new IllegalArgumentException("[interval] must be greater than or equal to 1."); - } + public Builder setInterval(Interval interval) { this.interval = interval; return this; } - /** - * Set the interval for the DateHistogram grouping - * @param timeValue The time value to use as the interval - * @return the {@link Builder} with the interval set. - */ - public Builder setInterval(TimeValue timeValue) { - return setInterval(timeValue.getMillis()); - } - - /** - * Sets the interval of the DateHistogram grouping - * - * If this DateHistogramInterval is set, it supersedes the #{@link DateHistogramGroupSource#getInterval()} - * @param dateHistogramInterval the DateHistogramInterval to set - * @return The {@link Builder} with the dateHistogramInterval set. - */ - public Builder setDateHistgramInterval(DateHistogramInterval dateHistogramInterval) { - if (dateHistogramInterval == null) { - throw new IllegalArgumentException("[dateHistogramInterval] must not be null"); - } - this.dateHistogramInterval = dateHistogramInterval; - return this; - } - /** * Set the optional String formatting for the time interval. * @param format The format of the output for the time interval key @@ -243,7 +340,7 @@ public Builder setTimeZone(ZoneId timeZone) { } public DateHistogramGroupSource build() { - return new DateHistogramGroupSource(field, interval, dateHistogramInterval, format, timeZone); + return new DateHistogramGroupSource(field, interval, format, timeZone); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java index c6a160d9b8b8d..32605f5c286ad 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java @@ -27,15 +27,20 @@ public class DateHistogramGroupSourceTests extends AbstractXContentTestCase { + public static DateHistogramGroupSource.Interval randomDateHistogramInterval() { + if (randomBoolean()) { + return new DateHistogramGroupSource.FixedInterval(new DateHistogramInterval(randomPositiveTimeValue())); + } else { + return new DateHistogramGroupSource.CalendarInterval(new DateHistogramInterval(randomTimeValue(1, 1, "m", "h", "d", "w"))); + } + } + public static DateHistogramGroupSource randomDateHistogramGroupSource() { String field = randomAlphaOfLengthBetween(1, 20); - boolean setInterval = randomBoolean(); return new DateHistogramGroupSource(field, - setInterval ? randomLongBetween(1, 10_000) : 0, - setInterval ? null : randomFrom(DateHistogramInterval.days(10), - DateHistogramInterval.minutes(1), DateHistogramInterval.weeks(1)), - randomBoolean() ? randomAlphaOfLength(10) : null, - randomBoolean() ? randomZone() : null); + randomDateHistogramInterval(), + randomBoolean() ? randomAlphaOfLength(10) : null, + randomBoolean() ? randomZone() : null); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java new file mode 100644 index 0000000000000..dc31004607dcd --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms.pivot.hlrc; + +import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.xpack.core.dataframe.transforms.pivot.DateHistogramGroupSource; + +import static org.hamcrest.Matchers.equalTo; + +public class DateHistogramGroupSourceTests extends AbstractResponseTestCase< + DateHistogramGroupSource, + org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource> { + + public static DateHistogramGroupSource randomDateHistogramGroupSource() { + String field = randomAlphaOfLengthBetween(1, 20); + DateHistogramGroupSource dateHistogramGroupSource; // = new DateHistogramGroupSource(field); + if (randomBoolean()) { + dateHistogramGroupSource = new DateHistogramGroupSource(field, new DateHistogramGroupSource.FixedInterval( + new DateHistogramInterval(randomPositiveTimeValue()))); + } else { + dateHistogramGroupSource = new DateHistogramGroupSource(field, new DateHistogramGroupSource.CalendarInterval( + new DateHistogramInterval(randomTimeValue(1,1, "m", "h", "d", "w")))); + } + + if (randomBoolean()) { + dateHistogramGroupSource.setTimeZone(randomZone()); + } + if (randomBoolean()) { + dateHistogramGroupSource.setFormat(randomAlphaOfLength(10)); + } + return dateHistogramGroupSource; + } + + @Override + protected DateHistogramGroupSource createServerTestInstance() { + return randomDateHistogramGroupSource(); + } + + @Override + protected org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource doParseToClientInstance(XContentParser parser) { + return org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource.fromXContent(parser); + } + + @Override + protected void assertInstances(DateHistogramGroupSource serverTestInstance, + org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource clientInstance) { + assertThat(serverTestInstance.getField(), equalTo(clientInstance.getField())); + assertThat(serverTestInstance.getFormat(), equalTo(clientInstance.getFormat())); + assertSameInterval(serverTestInstance.getInterval(), clientInstance.getInterval()); + assertThat(serverTestInstance.getTimeZone(), equalTo(clientInstance.getTimeZone())); + assertThat(serverTestInstance.getType().name(), equalTo(clientInstance.getType().name())); + } + + private void assertSameInterval(DateHistogramGroupSource.Interval serverTestInstance, + org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource.Interval clientInstance) { + assertEquals(serverTestInstance.getName(), clientInstance.getName()); + assertEquals(serverTestInstance.getInterval(), clientInstance.getInterval()); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java index f4bf094235ae4..a3861ef65f648 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java @@ -8,10 +8,13 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import java.io.IOException; @@ -19,28 +22,186 @@ import java.time.ZoneOffset; import java.util.Objects; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + public class DateHistogramGroupSource extends SingleGroupSource { + private static final int CALENDAR_INTERVAL_ID = 1; + private static final int FIXED_INTERVAL_ID = 0; + + /** + * Interval can be specified in 2 ways: + * + * fixed_interval fixed intervals like 1h, 1m, 1d + * calendar_interval calendar aware intervals like 1M, 1Y, ... + * + * Note: data frames do not support the deprecated interval option + */ + public interface Interval extends Writeable, ToXContentFragment { + String getName(); + DateHistogramInterval getInterval(); + byte getIntervalTypeId(); + } + + public static class FixedInterval implements Interval { + private static final String NAME = "fixed_interval"; + private final DateHistogramInterval interval; + + public FixedInterval(DateHistogramInterval interval) { + this.interval = interval; + } + + public FixedInterval(StreamInput in) throws IOException { + this.interval = new DateHistogramInterval(in); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public DateHistogramInterval getInterval() { + return interval; + } + + @Override + public byte getIntervalTypeId() { + return FIXED_INTERVAL_ID; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(NAME); + interval.toXContent(builder, params); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + interval.writeTo(out); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final FixedInterval that = (FixedInterval) other; + return Objects.equals(this.interval, that.interval); + } + + @Override + public int hashCode() { + return Objects.hash(interval); + } + } + + public static class CalendarInterval implements Interval { + private static final String NAME = "calendar_interval"; + private final DateHistogramInterval interval; + + public CalendarInterval(DateHistogramInterval interval) { + this.interval = interval; + if (DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(interval.toString()) == null) { + throw new IllegalArgumentException("The supplied interval [" + interval + "] could not be parsed " + + "as a calendar interval."); + } + } + + public CalendarInterval(StreamInput in) throws IOException { + this.interval = new DateHistogramInterval(in); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public DateHistogramInterval getInterval() { + return interval; + } + + @Override + public byte getIntervalTypeId() { + return CALENDAR_INTERVAL_ID; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(NAME); + interval.toXContent(builder, params); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + interval.writeTo(out); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final CalendarInterval that = (CalendarInterval) other; + return Objects.equals(this.interval, that.interval); + } + + @Override + public int hashCode() { + return Objects.hash(interval); + } + } + + private Interval readInterval(StreamInput in) throws IOException { + byte id = in.readByte(); + switch (id) { + case FIXED_INTERVAL_ID: + return new FixedInterval(in); + case CALENDAR_INTERVAL_ID: + return new CalendarInterval(in); + default: + throw new IllegalArgumentException("unknown interval type [" + id + "]"); + } + } + + private void writeInterval(Interval interval, StreamOutput out) throws IOException { + out.write(interval.getIntervalTypeId()); + interval.writeTo(out); + } + private static final String NAME = "data_frame_date_histogram_group"; private static final ParseField TIME_ZONE = new ParseField("time_zone"); private static final ParseField FORMAT = new ParseField("format"); private static final ConstructingObjectParser STRICT_PARSER = createParser(false); private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); - private long interval = 0; - private DateHistogramInterval dateHistogramInterval; + + private final Interval interval; private String format; private ZoneId timeZone; - public DateHistogramGroupSource(String field) { + public DateHistogramGroupSource(String field, Interval interval) { super(field); + this.interval = interval; } public DateHistogramGroupSource(StreamInput in) throws IOException { super(in); - this.interval = in.readLong(); - this.dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); + this.interval = readInterval(in); this.timeZone = in.readOptionalZoneId(); this.format = in.readOptionalString(); } @@ -48,24 +209,28 @@ public DateHistogramGroupSource(StreamInput in) throws IOException { private static ConstructingObjectParser createParser(boolean lenient) { ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, (args) -> { String field = (String) args[0]; - return new DateHistogramGroupSource(field); - }); + String fixedInterval = (String) args[1]; + String calendarInterval = (String) args[2]; - declareValuesSourceFields(parser); + Interval interval = null; - parser.declareField((histogram, interval) -> { - if (interval instanceof Long) { - histogram.setInterval((long) interval); + if (fixedInterval != null && calendarInterval != null) { + throw new IllegalArgumentException("You must specify either fixed_interval or calendar_interval, found both"); + } else if (fixedInterval != null) { + interval = new FixedInterval(new DateHistogramInterval(fixedInterval)); + } else if (calendarInterval != null) { + interval = new CalendarInterval(new DateHistogramInterval(calendarInterval)); } else { - histogram.setDateHistogramInterval((DateHistogramInterval) interval); + throw new IllegalArgumentException("You must specify either fixed_interval or calendar_interval, found none"); } - }, p -> { - if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { - return p.longValue(); - } else { - return new DateHistogramInterval(p.text()); - } - }, HistogramGroupSource.INTERVAL, ObjectParser.ValueType.LONG); + + return new DateHistogramGroupSource(field, interval); + }); + + declareValuesSourceFields(parser); + + parser.declareString(optionalConstructorArg(), new ParseField(FixedInterval.NAME)); + parser.declareString(optionalConstructorArg(), new ParseField(CalendarInterval.NAME)); parser.declareField(DateHistogramGroupSource::setTimeZone, p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { @@ -88,28 +253,10 @@ public Type getType() { return Type.DATE_HISTOGRAM; } - public long getInterval() { + public Interval getInterval() { return interval; } - public void setInterval(long interval) { - if (interval < 1) { - throw new IllegalArgumentException("[interval] must be greater than or equal to 1."); - } - this.interval = interval; - } - - public DateHistogramInterval getDateHistogramInterval() { - return dateHistogramInterval; - } - - public void setDateHistogramInterval(DateHistogramInterval dateHistogramInterval) { - if (dateHistogramInterval == null) { - throw new IllegalArgumentException("[dateHistogramInterval] must not be null"); - } - this.dateHistogramInterval = dateHistogramInterval; - } - public String getFormat() { return format; } @@ -129,8 +276,7 @@ public void setTimeZone(ZoneId timeZone) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(field); - out.writeLong(interval); - out.writeOptionalWriteable(dateHistogramInterval); + writeInterval(interval, out); out.writeOptionalZoneId(timeZone); out.writeOptionalString(format); } @@ -141,11 +287,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (field != null) { builder.field(FIELD.getPreferredName(), field); } - if (dateHistogramInterval == null) { - builder.field(HistogramGroupSource.INTERVAL.getPreferredName(), interval); - } else { - builder.field(HistogramGroupSource.INTERVAL.getPreferredName(), dateHistogramInterval.toString()); - } + interval.toXContent(builder, params); if (timeZone != null) { builder.field(TIME_ZONE.getPreferredName(), timeZone.toString()); } @@ -170,13 +312,12 @@ public boolean equals(Object other) { return Objects.equals(this.field, that.field) && Objects.equals(interval, that.interval) && - Objects.equals(dateHistogramInterval, that.dateHistogramInterval) && Objects.equals(timeZone, that.timeZone) && Objects.equals(format, that.format); } @Override public int hashCode() { - return Objects.hash(field, interval, dateHistogramInterval, timeZone, format); + return Objects.hash(field, interval, timeZone, format); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java index e9d989d5e5f38..7ce0374331323 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java @@ -17,13 +17,15 @@ public class DateHistogramGroupSourceTests extends AbstractSerializingTestCase groups = new HashMap<>(); - groups.put("by-day", createDateHistogramGroupSource("timestamp", DateHistogramInterval.DAY, null, null)); + groups.put("by-day", createDateHistogramGroupSourceWithCalendarInterval("timestamp", DateHistogramInterval.DAY, null, null)); groups.put("by-user", TermsGroupSource.builder().setField("user_id").build()); groups.put("by-business", TermsGroupSource.builder().setField("business_id").build()); @@ -48,10 +48,8 @@ public void testDataFrameTransformCrud() throws Exception { "reviews-by-user-business-day", REVIEWS_INDEX_NAME); - final RequestOptions options = - expectWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); - assertTrue(putDataFrameTransform(config, options).isAcknowledged()); - assertTrue(startDataFrameTransform(config.getId(), options).isStarted()); + assertTrue(putDataFrameTransform(config, RequestOptions.DEFAULT).isAcknowledged()); + assertTrue(startDataFrameTransform(config.getId(), RequestOptions.DEFAULT).isStarted()); waitUntilCheckpoint(config.getId(), 1L); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 770eaec7bd141..22586a7b37d27 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -216,7 +216,7 @@ public void testDateHistogramPivot() throws Exception { + " \"group_by\": {" + " \"by_hr\": {" + " \"date_histogram\": {" - + " \"interval\": \"1h\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-DD_HH\"" + + " \"fixed_interval\": \"1h\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd_HH\"" + " } } }," + " \"aggregations\": {" + " \"avg_rating\": {" @@ -226,14 +226,11 @@ public void testDateHistogramPivot() throws Exception { + "}"; createDataframeTransformRequest.setJsonEntity(config); - createDataframeTransformRequest.setOptions(expectWarnings("[interval] on [date_histogram] is deprecated, " + - "use [fixed_interval] or [calendar_interval] in the future.")); Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS, - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); assertTrue(indexExists(dataFrameIndex)); Map indexStats = getAsMap(dataFrameIndex + "/_stats"); @@ -253,7 +250,7 @@ public void testPreviewTransform() throws Exception { config += " \"pivot\": {" + " \"group_by\": {" + " \"reviewer\": {\"terms\": { \"field\": \"user_id\" }}," - + " \"by_day\": {\"date_histogram\": {\"interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-DD\"}}}," + + " \"by_day\": {\"date_histogram\": {\"fixed_interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd\"}}}," + " \"aggregations\": {" + " \"avg_rating\": {" + " \"avg\": {" @@ -261,8 +258,6 @@ public void testPreviewTransform() throws Exception { + " } } } }" + "}"; createPreviewRequest.setJsonEntity(config); - createPreviewRequest.setOptions(expectWarnings("[interval] on [date_histogram] is deprecated, " + - "use [fixed_interval] or [calendar_interval] in the future.")); Map previewDataframeResponse = entityAsMap(client().performRequest(createPreviewRequest)); List> preview = (List>)previewDataframeResponse.get("preview"); @@ -290,7 +285,7 @@ public void testPivotWithMaxOnDateField() throws Exception { config +=" \"pivot\": { \n" + " \"group_by\": {\n" + " \"by_day\": {\"date_histogram\": {\n" + - " \"interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-DD\"\n" + + " \"fixed_interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd\"\n" + " }}\n" + " },\n" + " \n" + @@ -305,13 +300,11 @@ public void testPivotWithMaxOnDateField() throws Exception { + "}"; createDataframeTransformRequest.setJsonEntity(config); - createDataframeTransformRequest.setOptions(expectWarnings("[interval] on [date_histogram] is deprecated, " + - "use [fixed_interval] or [calendar_interval] in the future.")); + Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS, - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); assertTrue(indexExists(dataFrameIndex)); // we expect 21 documents as there shall be 21 days worth of docs diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index 1d4a190b24e14..5e58048b3bf0f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -67,12 +67,7 @@ setup: --- "Test preview transform": - - skip: - reason: date histo interval is deprecated - features: "warnings" - do: - warnings: - - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." data_frame.preview_data_frame_transform: body: > { @@ -80,7 +75,7 @@ setup: "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}, - "by-hour": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "by-hour": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-dd HH"}}}, "aggs": { "avg_response": {"avg": {"field": "responsetime"}}, "time.max": {"max": {"field": "time"}}, @@ -89,17 +84,17 @@ setup: } } - match: { preview.0.airline: foo } - - match: { preview.0.by-hour: "2017-02-49 00" } + - match: { preview.0.by-hour: "2017-02-18 00" } - match: { preview.0.avg_response: 1.0 } - match: { preview.0.time.max: "2017-02-18T00:30:00.000Z" } - match: { preview.0.time.min: "2017-02-18T00:00:00.000Z" } - match: { preview.1.airline: bar } - - match: { preview.1.by-hour: "2017-02-49 01" } + - match: { preview.1.by-hour: "2017-02-18 01" } - match: { preview.1.avg_response: 42.0 } - match: { preview.1.time.max: "2017-02-18T01:00:00.000Z" } - match: { preview.1.time.min: "2017-02-18T01:00:00.000Z" } - match: { preview.2.airline: foo } - - match: { preview.2.by-hour: "2017-02-49 01" } + - match: { preview.2.by-hour: "2017-02-18 01" } - match: { preview.2.avg_response: 42.0 } - match: { preview.2.time.max: "2017-02-18T01:01:00.000Z" } - match: { preview.2.time.min: "2017-02-18T01:01:00.000Z" } @@ -128,7 +123,7 @@ setup: "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}, - "by-hour": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "by-hour": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-dd HH"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } From 8f030336353dc76e83ff6ddd069cae05f8a2e8d5 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Fri, 24 May 2019 15:03:11 -0400 Subject: [PATCH 262/321] [DOCS] Move callouts to end of line for Asciidoctor migration (#42356) --- docs/reference/sql/functions/geo.asciidoc | 29 +++++++++++++++++------ 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/docs/reference/sql/functions/geo.asciidoc b/docs/reference/sql/functions/geo.asciidoc index 112ddfffce6ed..72f69af85529f 100644 --- a/docs/reference/sql/functions/geo.asciidoc +++ b/docs/reference/sql/functions/geo.asciidoc @@ -27,7 +27,9 @@ interchangeably with the following exceptions: .Synopsis: [source, sql] -------------------------------------------------- -ST_AsWKT(geometry<1>) +ST_AsWKT( + geometry <1> +) -------------------------------------------------- *Input*: @@ -52,7 +54,9 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt] .Synopsis: [source, sql] -------------------------------------------------- -ST_WKTToSQL(string<1>) +ST_WKTToSQL( + string <1> +) -------------------------------------------------- *Input*: @@ -78,7 +82,9 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt] .Synopsis: [source, sql] -------------------------------------------------- -ST_GeometryType(geometry<1>) +ST_GeometryType( + geometry <1> +) -------------------------------------------------- *Input*: @@ -102,7 +108,9 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[geometrytype] .Synopsis: [source, sql] -------------------------------------------------- -ST_X(geometry<1>) +ST_X( + geometry <1> +) -------------------------------------------------- *Input*: @@ -126,7 +134,9 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[x] .Synopsis: [source, sql] -------------------------------------------------- -ST_Y(geometry<1>) +ST_Y( + geometry <1> +) -------------------------------------------------- *Input*: @@ -150,7 +160,9 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[y] .Synopsis: [source, sql] -------------------------------------------------- -ST_Z(geometry<1>) +ST_Z( + geometry <1> +) -------------------------------------------------- *Input*: @@ -174,7 +186,10 @@ include-tagged::{sql-specs}/docs/geo.csv-spec[z] .Synopsis: [source, sql] -------------------------------------------------- -ST_Distance(geometry<1>, geometry<2>) +ST_Distance( + geometry, <1> + geometry <2> +) -------------------------------------------------- *Input*: From 5eb38ec51782651a2ed88f96d514ce6b3f253bb7 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 24 May 2019 20:04:05 +0100 Subject: [PATCH 263/321] [ML] Fix possible race condition when closing an opening job (#42506) This change fixes a race condition that would result in an in-memory data structure becoming out-of-sync with persistent tasks in cluster state. If repeated often enough this could result in it being impossible to open any ML jobs on the affected node, as the master node would think the node had capacity to open another job but the chosen node would error during the open sequence due to its in-memory data structure being full. The race could be triggered by opening a job and then closing it a tiny fraction of a second later. It is unlikely a user of the UI could open and close the job that fast, but a script or program calling the REST API could. The nasty thing is, from the externally observable states and stats everything would appear to be fine - the fast open then close sequence would appear to leave the job in the closed state. It's only later that the leftovers in the in-memory data structure might build up and cause a problem. --- .../autodetect/AutodetectProcessManager.java | 34 +++++++++++-------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 0fd8d1b5b7411..cbcaf54b46b9e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -401,16 +401,12 @@ protected void doRun() { logger.debug("Aborted opening job [{}] as it has been closed", jobId); return; } - if (processContext.getState() != ProcessContext.ProcessStateName.NOT_RUNNING) { - logger.debug("Cannot open job [{}] when its state is [{}]", - jobId, processContext.getState().getClass().getName()); - return; - } try { - createProcessAndSetRunning(processContext, job, params, closeHandler); - processContext.getAutodetectCommunicator().restoreState(params.modelSnapshot()); - setJobState(jobTask, JobState.OPENED); + if (createProcessAndSetRunning(processContext, job, params, closeHandler)) { + processContext.getAutodetectCommunicator().restoreState(params.modelSnapshot()); + setJobState(jobTask, JobState.OPENED); + } } catch (Exception e1) { // No need to log here as the persistent task framework will log it try { @@ -447,19 +443,25 @@ protected void doRun() { ElasticsearchMappings::resultsMapping, client, clusterState, resultsMappingUpdateHandler); } - private void createProcessAndSetRunning(ProcessContext processContext, - Job job, - AutodetectParams params, - BiConsumer handler) throws IOException { + private boolean createProcessAndSetRunning(ProcessContext processContext, + Job job, + AutodetectParams params, + BiConsumer handler) throws IOException { // At this point we lock the process context until the process has been started. // The reason behind this is to ensure closing the job does not happen before // the process is started as that can result to the job getting seemingly closed // but the actual process is hanging alive. processContext.tryLock(); try { + if (processContext.getState() != ProcessContext.ProcessStateName.NOT_RUNNING) { + logger.debug("Cannot open job [{}] when its state is [{}]", + job.getId(), processContext.getState().getClass().getName()); + return false; + } AutodetectCommunicator communicator = create(processContext.getJobTask(), job, params, handler); communicator.writeHeader(); processContext.setRunning(communicator); + return true; } finally { // Now that the process is running and we have updated its state we can unlock. // It is important to unlock before we initialize the communicator (ie. load the model state) @@ -592,6 +594,8 @@ public void closeJob(JobTask jobTask, boolean restart, String reason) { try { if (processContext.setDying() == false) { logger.debug("Cannot close job [{}] as it has been marked as dying", jobId); + // The only way we can get here is if 2 close requests are made very close together. + // The other close has done the work so it's safe to return here without doing anything. return; } @@ -605,10 +609,10 @@ public void closeJob(JobTask jobTask, boolean restart, String reason) { if (communicator == null) { logger.debug("Job [{}] is being closed before its process is started", jobId); jobTask.markAsCompleted(); - return; + } else { + communicator.close(restart, reason); } - communicator.close(restart, reason); processByAllocation.remove(allocationId); } catch (Exception e) { // If the close failed because the process has explicitly been killed by us then just pass on that exception @@ -628,7 +632,7 @@ public void closeJob(JobTask jobTask, boolean restart, String reason) { try { nativeStorageProvider.cleanupLocalTmpStorage(jobTask.getDescription()); } catch (IOException e) { - logger.error(new ParameterizedMessage("[{}]Failed to delete temporary files", jobId), e); + logger.error(new ParameterizedMessage("[{}] Failed to delete temporary files", jobId), e); } } From 5720a329ad9bd38fa0ce330b1642d0ffe42538ea Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 24 May 2019 20:05:15 +0100 Subject: [PATCH 264/321] [ML] Use map and filter instead of flatMap in find_file_structure (#42534) Using map and filter avoids the garbage from all the Stream.of calls that flatMap necessitated. Performance is better when there are masses of fields. --- .../xpack/ml/filestructurefinder/FileStructureUtils.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java index 24a29f11e4355..90cc74c8d259c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtils.java @@ -187,11 +187,8 @@ static Tuple, SortedMap> guessMapp for (String fieldName : uniqueFieldNames) { - List fieldValues = sampleRecords.stream().flatMap(record -> { - Object fieldValue = record.get(fieldName); - return (fieldValue == null) ? Stream.empty() : Stream.of(fieldValue); - } - ).collect(Collectors.toList()); + List fieldValues = sampleRecords.stream().map(record -> record.get(fieldName)).filter(fieldValue -> fieldValue != null) + .collect(Collectors.toList()); Tuple, FieldStats> mappingAndFieldStats = guessMappingAndCalculateFieldStats(explanation, fieldName, fieldValues, timeoutChecker); From eda3da31dace56623e84de78df183483ee2c8309 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 12:10:32 -0700 Subject: [PATCH 265/321] Improve build configuration time (#41392) This commit moves the expensive configuration-time calculation of Java runtime version information to runtime instead and also makes that work cacheable. This roughly equates to about a 50% reduction in project configuration time. --- build.gradle | 9 +- buildSrc/build.gradle | 34 +- .../elasticsearch/gradle/BuildPlugin.groovy | 737 +++++++----------- .../gradle/plugin/PluginBuildPlugin.groovy | 5 +- .../gradle/precommit/PrecommitTasks.groovy | 31 +- .../gradle/test/ClusterFormationTasks.groovy | 27 +- .../gradle/test/RestIntegTestTask.groovy | 41 +- .../test/StandaloneRestTestPlugin.groovy | 8 +- .../elasticsearch/gradle/JdkJarHellCheck.java | 0 .../gradle/LazyFileOutputStream.java | 0 .../org/elasticsearch/gradle/LoggedExec.java | 0 .../org/elasticsearch/gradle/Version.java | 0 .../gradle/VersionProperties.java | 0 .../info/GenerateGlobalBuildInfoTask.java | 276 +++++++ .../gradle/info/GlobalBuildInfoPlugin.java | 198 +++++ .../gradle/info/GlobalInfoExtension.java | 12 + .../elasticsearch/gradle/info/JavaHome.java | 35 + .../gradle/info/PrintGlobalBuildInfoTask.java | 84 ++ .../gradle/precommit/ThirdPartyAuditTask.java | 11 +- ...elasticsearch.global-build-info.properties | 1 + distribution/tools/plugin-cli/build.gradle | 6 +- modules/transport-netty4/build.gradle | 14 +- plugins/ingest-attachment/build.gradle | 10 +- plugins/transport-nio/build.gradle | 14 +- server/build.gradle | 6 +- x-pack/plugin/ccr/qa/restart/build.gradle | 2 +- x-pack/plugin/security/cli/build.gradle | 26 +- .../sql/qa/security/with-ssl/build.gradle | 14 +- x-pack/qa/full-cluster-restart/build.gradle | 32 +- .../reindex-tests-with-security/build.gradle | 6 +- x-pack/qa/rolling-upgrade/build.gradle | 32 +- 31 files changed, 1022 insertions(+), 649 deletions(-) rename buildSrc/src/main/{minimumRuntime => java}/org/elasticsearch/gradle/JdkJarHellCheck.java (100%) rename buildSrc/src/main/{minimumRuntime => java}/org/elasticsearch/gradle/LazyFileOutputStream.java (100%) rename buildSrc/src/main/{minimumRuntime => java}/org/elasticsearch/gradle/LoggedExec.java (100%) rename buildSrc/src/main/{minimumRuntime => java}/org/elasticsearch/gradle/Version.java (100%) rename buildSrc/src/main/{minimumRuntime => java}/org/elasticsearch/gradle/VersionProperties.java (100%) create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalInfoExtension.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/info/JavaHome.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/info/PrintGlobalBuildInfoTask.java create mode 100644 buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.global-build-info.properties diff --git a/build.gradle b/build.gradle index 7de02b814da86..1de3f919d9c49 100644 --- a/build.gradle +++ b/build.gradle @@ -31,6 +31,7 @@ import org.gradle.plugins.ide.eclipse.model.SourceFolder plugins { id 'com.gradle.build-scan' version '2.2.1' id 'base' + id 'elasticsearch.global-build-info' } if (Boolean.valueOf(project.findProperty('org.elasticsearch.acceptScanTOS') ?: "false")) { buildScan { @@ -262,7 +263,7 @@ allprojects { } project.afterEvaluate { - configurations.all { + configurations.matching { it.canBeResolved }.all { resolutionStrategy.dependencySubstitution { DependencySubstitutions subs -> projectSubstitutions.each { k,v -> subs.substitute(subs.module(k)).with(subs.project(v)) @@ -336,7 +337,7 @@ gradle.projectsEvaluated { if (tasks.findByPath('test') != null && tasks.findByPath('integTest') != null) { integTest.mustRunAfter test } - configurations.all { Configuration configuration -> + configurations.matching { it.canBeResolved }.all { Configuration configuration -> dependencies.all { Dependency dep -> Project upstreamProject = dependencyToProject(dep) if (upstreamProject != null) { @@ -617,7 +618,3 @@ allprojects { } } } - - - - diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 737bbca4cafb9..f239427330c58 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -69,37 +69,10 @@ processResources { if (JavaVersion.current() < JavaVersion.VERSION_11) { throw new GradleException('At least Java 11 is required to build elasticsearch gradle tools') } -// Gradle 4.10 does not support setting this to 11 yet -targetCompatibility = "10" -sourceCompatibility = "10" - -// We have a few classes that need to be compiled for older java versions because these are used to run checks against -// those -sourceSets { - minimumRuntime { - // We only want Java here, but the Groovy doesn't configure javadoc correctly if we don't define this as groovy - groovy { - srcDirs = ['src/main/minimumRuntime'] - } - } -} -compileMinimumRuntimeGroovy { - targetCompatibility = 8 - sourceCompatibility = 8 -} -dependencies { - if (project.ext.has("isEclipse") == false || project.ext.isEclipse == false) { - // eclipse is confused if this is set explicitly - compile sourceSets.minimumRuntime.output - } - minimumRuntimeCompile "junit:junit:${props.getProperty('junit')}" - minimumRuntimeCompile localGroovy() - minimumRuntimeCompile gradleApi() -} -jar { - from sourceSets.minimumRuntime.output -} +// Keep compatibility with Java 8 for external users of build-tools that haven't migrated to Java 11 +targetCompatibility = '8' +sourceCompatibility = '8' /***************************************************************************** * Dependencies used by the entire build * @@ -164,7 +137,6 @@ if (project != rootProject) { dependenciesInfo.enabled = false forbiddenApisMain.enabled = false forbiddenApisTest.enabled = false - forbiddenApisMinimumRuntime.enabled = false jarHell.enabled = false thirdPartyAudit.enabled = false diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 51300ffc628c9..3a058ca9310df 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -19,15 +19,24 @@ package org.elasticsearch.gradle import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin +import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar +import groovy.transform.CompileDynamic +import groovy.transform.CompileStatic import org.apache.commons.io.IOUtils -import org.apache.tools.ant.taskdefs.condition.Os import org.eclipse.jgit.lib.Constants import org.eclipse.jgit.lib.RepositoryBuilder +import org.elasticsearch.gradle.info.GlobalBuildInfoPlugin +import org.elasticsearch.gradle.info.GlobalInfoExtension +import org.elasticsearch.gradle.info.JavaHome +import org.elasticsearch.gradle.precommit.DependencyLicensesTask import org.elasticsearch.gradle.precommit.PrecommitTasks import org.elasticsearch.gradle.test.ErrorReportingTestListener +import org.elasticsearch.gradle.testclusters.ElasticsearchCluster +import org.gradle.api.Action import org.gradle.api.GradleException import org.gradle.api.InvalidUserDataException import org.gradle.api.JavaVersion +import org.gradle.api.NamedDomainObjectContainer import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.Task @@ -41,22 +50,34 @@ import org.gradle.api.artifacts.ResolvedArtifact import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.artifacts.repositories.ArtifactRepository import org.gradle.api.artifacts.repositories.IvyArtifactRepository +import org.gradle.api.artifacts.repositories.IvyPatternRepositoryLayout import org.gradle.api.artifacts.repositories.MavenArtifactRepository import org.gradle.api.credentials.HttpHeaderCredentials import org.gradle.api.execution.TaskActionListener import org.gradle.api.execution.TaskExecutionGraph +import org.gradle.api.file.CopySpec +import org.gradle.api.plugins.BasePlugin +import org.gradle.api.plugins.BasePluginConvention +import org.gradle.api.plugins.ExtraPropertiesExtension import org.gradle.api.plugins.JavaPlugin +import org.gradle.api.plugins.JavaPluginExtension +import org.gradle.api.publish.PublishingExtension import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin import org.gradle.api.publish.maven.tasks.GenerateMavenPom import org.gradle.api.tasks.SourceSet +import org.gradle.api.tasks.SourceSetContainer import org.gradle.api.tasks.bundling.Jar import org.gradle.api.tasks.compile.GroovyCompile import org.gradle.api.tasks.compile.JavaCompile import org.gradle.api.tasks.javadoc.Javadoc import org.gradle.api.tasks.testing.Test +import org.gradle.api.tasks.testing.logging.TestLoggingContainer import org.gradle.authentication.http.HttpHeaderAuthentication +import org.gradle.external.javadoc.CoreJavadocOptions import org.gradle.internal.jvm.Jvm +import org.gradle.language.base.plugins.LifecycleBasePlugin +import org.gradle.process.CommandLineArgumentProvider import org.gradle.process.ExecResult import org.gradle.process.ExecSpec import org.gradle.util.GradleVersion @@ -64,18 +85,19 @@ import org.gradle.util.GradleVersion import java.nio.charset.StandardCharsets import java.time.ZoneOffset import java.time.ZonedDateTime -import java.util.concurrent.ExecutorService -import java.util.concurrent.Executors -import java.util.concurrent.Future import java.util.regex.Matcher /** * Encapsulates build configuration for elasticsearch projects. */ +@CompileStatic class BuildPlugin implements Plugin { @Override void apply(Project project) { + // make sure the global build info plugin is applied to the root project + project.rootProject.pluginManager.apply(GlobalBuildInfoPlugin) + if (project.pluginManager.hasPlugin('elasticsearch.standalone-rest-test')) { throw new InvalidUserDataException('elasticsearch.standalone-test, ' + 'elasticsearch.standalone-rest-test, and elasticsearch.build ' @@ -105,9 +127,8 @@ class BuildPlugin implements Plugin { project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask) setupSeed(project) - globalBuildInfo(project) configureRepositories(project) - project.ext.versions = VersionProperties.versions + project.extensions.getByType(ExtraPropertiesExtension).set('versions', VersionProperties.versions) configureSourceSets(project) configureCompile(project) configureJavadoc(project) @@ -118,175 +139,37 @@ class BuildPlugin implements Plugin { configureDependenciesInfo(project) // Common config when running with a FIPS-140 runtime JVM - // Need to do it here to support external plugins - if (project.ext.inFipsJvm) { - project.tasks.withType(Test) { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } - project.pluginManager.withPlugin("elasticsearch.testclusters") { - project.testClusters.all { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } - } - } - - } - - - - /** Performs checks on the build environment and prints information about the build environment. */ - static void globalBuildInfo(Project project) { - if (project.rootProject.ext.has('buildChecksDone') == false) { - JavaVersion minimumRuntimeVersion = JavaVersion.toVersion( - BuildPlugin.class.getClassLoader().getResourceAsStream("minimumRuntimeVersion").text.trim() - ) - JavaVersion minimumCompilerVersion = JavaVersion.toVersion( - BuildPlugin.class.getClassLoader().getResourceAsStream("minimumCompilerVersion").text.trim() - ) - String compilerJavaHome = findCompilerJavaHome() - String runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome) - File gradleJavaHome = Jvm.current().javaHome - - String javaVendor = System.getProperty('java.vendor') - String gradleJavaVersion = System.getProperty('java.version') - String gradleJavaVersionDetails = "${javaVendor} ${gradleJavaVersion}" + - " [${System.getProperty('java.vm.name')} ${System.getProperty('java.vm.version')}]" - - String compilerJavaVersionDetails = gradleJavaVersionDetails - JavaVersion compilerJavaVersionEnum = JavaVersion.current() - if (new File(compilerJavaHome).canonicalPath != gradleJavaHome.canonicalPath) { - compilerJavaVersionDetails = findJavaVersionDetails(project, compilerJavaHome) - compilerJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, compilerJavaHome)) - } - - String runtimeJavaVersionDetails = gradleJavaVersionDetails - JavaVersion runtimeJavaVersionEnum = JavaVersion.current() - if (new File(runtimeJavaHome).canonicalPath != gradleJavaHome.canonicalPath) { - runtimeJavaVersionDetails = findJavaVersionDetails(project, runtimeJavaHome) - runtimeJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, runtimeJavaHome)) - } - - boolean inFipsJvm = false - if (new File(runtimeJavaHome).canonicalPath != gradleJavaHome.canonicalPath) { - // We don't expect Gradle to be running in a FIPS JVM - String inFipsJvmScript = 'print(java.security.Security.getProviders()[0].name.toLowerCase().contains("fips"));' - inFipsJvm = Boolean.parseBoolean(runJavaAsScript(project, runtimeJavaHome, inFipsJvmScript)) - } - - // Build debugging info - println '=======================================' - println 'Elasticsearch Build Hamster says Hello!' - println " Gradle Version : ${project.gradle.gradleVersion}" - println " OS Info : ${System.getProperty('os.name')} ${System.getProperty('os.version')} (${System.getProperty('os.arch')})" - if (gradleJavaVersionDetails != compilerJavaVersionDetails || gradleJavaVersionDetails != runtimeJavaVersionDetails) { - println " Compiler JDK Version : ${compilerJavaVersionEnum} (${compilerJavaVersionDetails})" - println " Compiler java.home : ${compilerJavaHome}" - println " Runtime JDK Version : ${runtimeJavaVersionEnum} (${runtimeJavaVersionDetails})" - println " Runtime java.home : ${runtimeJavaHome}" - println " Gradle JDK Version : ${JavaVersion.toVersion(gradleJavaVersion)} (${gradleJavaVersionDetails})" - println " Gradle java.home : ${gradleJavaHome}" - } else { - println " JDK Version : ${JavaVersion.toVersion(gradleJavaVersion)} (${gradleJavaVersionDetails})" - println " JAVA_HOME : ${gradleJavaHome}" - } - println " Random Testing Seed : ${project.testSeed}" - println '=======================================' - - // enforce Java version - if (compilerJavaVersionEnum < minimumCompilerVersion) { - final String message = - "the compiler java.home must be set to a JDK installation directory for Java ${minimumCompilerVersion}" + - " but is [${compilerJavaHome}] corresponding to [${compilerJavaVersionEnum}]" - throw new GradleException(message) - } - - if (runtimeJavaVersionEnum < minimumRuntimeVersion) { - final String message = - "the runtime java.home must be set to a JDK installation directory for Java ${minimumRuntimeVersion}" + - " but is [${runtimeJavaHome}] corresponding to [${runtimeJavaVersionEnum}]" - throw new GradleException(message) - } - - final Map javaVersions = [:] - for (int version = 8; version <= Integer.parseInt(minimumCompilerVersion.majorVersion); version++) { - if(System.getenv(getJavaHomeEnvVarName(version.toString())) != null) { - javaVersions.put(version, findJavaHome(version.toString())); - } - } - - final int numberOfPhysicalCores = numberOfPhysicalCores(project.rootProject) - if (javaVersions.isEmpty() == false) { - - ExecutorService exec = Executors.newFixedThreadPool(numberOfPhysicalCores) - Set> results = new HashSet<>() - - javaVersions.entrySet().stream() - .filter { it.getValue() != null } - .forEach { javaVersionEntry -> - results.add(exec.submit { - final String javaHome = javaVersionEntry.getValue() - final int version = javaVersionEntry.getKey() - if (project.file(javaHome).exists() == false) { - throw new GradleException("Invalid JAVA${version}_HOME=${javaHome} location does not exist") + // Need to do it here to support external plugins + if (project == project.rootProject) { + GlobalInfoExtension globalInfo = project.extensions.getByType(GlobalInfoExtension) + + // wait until global info is populated because we don't know if we are running in a fips jvm until execution time + globalInfo.ready { + project.subprojects { Project subproject -> + ExtraPropertiesExtension ext = subproject.extensions.getByType(ExtraPropertiesExtension) + // Common config when running with a FIPS-140 runtime JVM + if (ext.has('inFipsJvm') && ext.get('inFipsJvm')) { + subproject.tasks.withType(Test) { Test task -> + task.systemProperty 'javax.net.ssl.trustStorePassword', 'password' + task.systemProperty 'javax.net.ssl.keyStorePassword', 'password' } - - JavaVersion javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome)) - final JavaVersion expectedJavaVersionEnum = version < 9 ? - JavaVersion.toVersion("1." + version) : - JavaVersion.toVersion(Integer.toString(version)) - - if (javaVersionEnum != expectedJavaVersionEnum) { - final String message = - "the environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java" + - " ${expectedJavaVersionEnum} but is [${javaHome}] corresponding to [${javaVersionEnum}]" - throw new GradleException(message) + project.pluginManager.withPlugin("elasticsearch.testclusters") { + NamedDomainObjectContainer testClusters = subproject.extensions.getByName('testClusters') as NamedDomainObjectContainer + testClusters.all { ElasticsearchCluster cluster -> + cluster.systemProperty 'javax.net.ssl.trustStorePassword', 'password' + cluster.systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } } - }) - } - - project.gradle.taskGraph.whenReady { - try { - results.forEach { it.get() } - } finally { - exec.shutdown(); } } } - - project.rootProject.ext.compilerJavaHome = compilerJavaHome - project.rootProject.ext.runtimeJavaHome = runtimeJavaHome - project.rootProject.ext.compilerJavaVersion = compilerJavaVersionEnum - project.rootProject.ext.runtimeJavaVersion = runtimeJavaVersionEnum - project.rootProject.ext.isRuntimeJavaHomeSet = compilerJavaHome.equals(runtimeJavaHome) == false - project.rootProject.ext.javaVersions = javaVersions - project.rootProject.ext.buildChecksDone = true - project.rootProject.ext.minimumCompilerVersion = minimumCompilerVersion - project.rootProject.ext.minimumRuntimeVersion = minimumRuntimeVersion - project.rootProject.ext.inFipsJvm = inFipsJvm - project.rootProject.ext.gradleJavaVersion = JavaVersion.toVersion(gradleJavaVersion) - project.rootProject.ext.java9Home = "${-> findJavaHome("9")}" - project.rootProject.ext.defaultParallel = numberOfPhysicalCores } - - project.targetCompatibility = project.rootProject.ext.minimumRuntimeVersion - project.sourceCompatibility = project.rootProject.ext.minimumRuntimeVersion - - // set java home for each project, so they dont have to find it in the root project - project.ext.compilerJavaHome = project.rootProject.ext.compilerJavaHome - project.ext.runtimeJavaHome = project.rootProject.ext.runtimeJavaHome - project.ext.compilerJavaVersion = project.rootProject.ext.compilerJavaVersion - project.ext.runtimeJavaVersion = project.rootProject.ext.runtimeJavaVersion - project.ext.isRuntimeJavaHomeSet = project.rootProject.ext.isRuntimeJavaHomeSet - project.ext.javaVersions = project.rootProject.ext.javaVersions - project.ext.inFipsJvm = project.rootProject.ext.inFipsJvm - project.ext.gradleJavaVersion = project.rootProject.ext.gradleJavaVersion - project.ext.java9Home = project.rootProject.ext.java9Home } static void requireDocker(final Task task) { final Project rootProject = task.project.rootProject + ExtraPropertiesExtension ext = rootProject.extensions.getByType(ExtraPropertiesExtension) + if (rootProject.hasProperty('requiresDocker') == false) { /* * This is our first time encountering a task that requires Docker. We will add an extension that will let us track the tasks @@ -314,11 +197,11 @@ class BuildPlugin implements Plugin { throw new IllegalArgumentException( "expected build.docker to be unset or one of \"true\" or \"false\" but was [" + buildDockerProperty + "]") } - rootProject.rootProject.ext.buildDocker = buildDocker - rootProject.rootProject.ext.requiresDocker = [] + + ext.set('buildDocker', buildDocker) + ext.set('requiresDocker', []) rootProject.gradle.taskGraph.whenReady { TaskExecutionGraph taskGraph -> - final List tasks = - ((List)rootProject.requiresDocker).findAll { taskGraph.hasTask(it) }.collect { " ${it.path}".toString()} + final List tasks = taskGraph.allTasks.intersect(ext.get('requiresDocker') as List).collect { " ${it.path}".toString()} if (tasks.isEmpty() == false) { /* * There are tasks in the task graph that require Docker. Now we are failing because either the Docker binary does not @@ -371,8 +254,9 @@ class BuildPlugin implements Plugin { } } } - if (rootProject.buildDocker) { - rootProject.requiresDocker.add(task) + + if (ext.get('buildDocker')) { + (ext.get('requiresDocker') as List).add(task) } else { task.enabled = false } @@ -400,130 +284,48 @@ class BuildPlugin implements Plugin { + "or by passing -Dbuild.docker=false") } - private static String findCompilerJavaHome() { - String compilerJavaHome = System.getenv('JAVA_HOME') - final String compilerJavaProperty = System.getProperty('compiler.java') - if (compilerJavaProperty != null) { - compilerJavaHome = findJavaHome(compilerJavaProperty) - } - if (compilerJavaHome == null) { - // if JAVA_HOME does not set,so we use the JDK that Gradle was run with. - return Jvm.current().javaHome - } - return compilerJavaHome - } - - private static String findJavaHome(String version) { - String versionedVarName = getJavaHomeEnvVarName(version) - String versionedJavaHome = System.getenv(versionedVarName); - if (versionedJavaHome == null) { - throw new GradleException( - "$versionedVarName must be set to build Elasticsearch. " + - "Note that if the variable was just set you might have to run `./gradlew --stop` for " + - "it to be picked up. See https://github.com/elastic/elasticsearch/issues/31399 details." - ) - } - return versionedJavaHome - } - - private static String getJavaHomeEnvVarName(String version) { - return 'JAVA' + version + '_HOME' - } - /** Add a check before gradle execution phase which ensures java home for the given java version is set. */ static void requireJavaHome(Task task, int version) { - Project rootProject = task.project.rootProject // use root project for global accounting + // use root project for global accounting + Project rootProject = task.project.rootProject + ExtraPropertiesExtension ext = rootProject.extensions.getByType(ExtraPropertiesExtension) + if (rootProject.hasProperty('requiredJavaVersions') == false) { - rootProject.rootProject.ext.requiredJavaVersions = [:] - rootProject.gradle.taskGraph.whenReady { TaskExecutionGraph taskGraph -> + ext.set('requiredJavaVersions', [:]) + rootProject.gradle.taskGraph.whenReady({ TaskExecutionGraph taskGraph -> List messages = [] - for (entry in rootProject.requiredJavaVersions) { - if (rootProject.javaVersions.get(entry.key) != null) { + Map> requiredJavaVersions = (Map>) ext.get('requiredJavaVersions') + for (Map.Entry> entry : requiredJavaVersions) { + List javaVersions = ext.get('javaVersions') as List + if (javaVersions.find { it.version == entry.key } != null) { continue } - List tasks = entry.value.findAll { taskGraph.hasTask(it) }.collect { " ${it.path}" } + List tasks = entry.value.findAll { taskGraph.hasTask(it) }.collect { " ${it.path}".toString() } if (tasks.isEmpty() == false) { - messages.add("JAVA${entry.key}_HOME required to run tasks:\n${tasks.join('\n')}") + messages.add("JAVA${entry.key}_HOME required to run tasks:\n${tasks.join('\n')}".toString()) } } if (messages.isEmpty() == false) { throw new GradleException(messages.join('\n')) } - rootProject.rootProject.ext.requiredJavaVersions = null // reset to null to indicate the pre-execution checks have executed - } - } else if (rootProject.rootProject.requiredJavaVersions == null) { + ext.set('requiredJavaVersions', null) // reset to null to indicate the pre-execution checks have executed + }) + } else if (ext.has('requiredJavaVersions') == false || ext.get('requiredJavaVersions') == null) { // check directly if the version is present since we are already executing - if (rootProject.javaVersions.get(version) == null) { + List javaVersions = ext.get('javaVersions') as List + if (javaVersions.find { it.version == version } == null) { throw new GradleException("JAVA${version}_HOME required to run task:\n${task}") } } else { - rootProject.requiredJavaVersions.getOrDefault(version, []).add(task) + (ext.get('requiredJavaVersions') as Map>).getOrDefault(version, []).add(task) } } /** A convenience method for getting java home for a version of java and requiring that version for the given task to execute */ static String getJavaHome(final Task task, final int version) { requireJavaHome(task, version) - return task.project.javaVersions.get(version) - } - - private static String findRuntimeJavaHome(final String compilerJavaHome) { - String runtimeJavaProperty = System.getProperty("runtime.java") - if (runtimeJavaProperty != null) { - return findJavaHome(runtimeJavaProperty) - } - return System.getenv('RUNTIME_JAVA_HOME') ?: compilerJavaHome - } - - /** Finds printable java version of the given JAVA_HOME */ - private static String findJavaVersionDetails(Project project, String javaHome) { - String versionInfoScript = 'print(' + - 'java.lang.System.getProperty("java.vendor") + " " + java.lang.System.getProperty("java.version") + ' + - '" [" + java.lang.System.getProperty("java.vm.name") + " " + java.lang.System.getProperty("java.vm.version") + "]");' - return runJavaAsScript(project, javaHome, versionInfoScript).trim() - } - - /** Finds the parsable java specification version */ - private static String findJavaSpecificationVersion(Project project, String javaHome) { - String versionScript = 'print(java.lang.System.getProperty("java.specification.version"));' - return runJavaAsScript(project, javaHome, versionScript) - } - - private static String findJavaVendor(Project project, String javaHome) { - String vendorScript = 'print(java.lang.System.getProperty("java.vendor"));' - return runJavaAsScript(project, javaHome, vendorScript) - } - - /** Finds the parsable java specification version */ - private static String findJavaVersion(Project project, String javaHome) { - String versionScript = 'print(java.lang.System.getProperty("java.version"));' - return runJavaAsScript(project, javaHome, versionScript) - } - - /** Runs the given javascript using jjs from the jdk, and returns the output */ - private static String runJavaAsScript(Project project, String javaHome, String script) { - ByteArrayOutputStream stdout = new ByteArrayOutputStream() - ByteArrayOutputStream stderr = new ByteArrayOutputStream() - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - // gradle/groovy does not properly escape the double quote for windows - script = script.replace('"', '\\"') - } - File jrunscriptPath = new File(javaHome, 'bin/jrunscript') - ExecResult result = project.exec { - executable = jrunscriptPath - args '-e', script - standardOutput = stdout - errorOutput = stderr - ignoreExitValue = true - } - if (result.exitValue != 0) { - project.logger.error("STDOUT:") - stdout.toString('UTF-8').eachLine { line -> project.logger.error(line) } - project.logger.error("STDERR:") - stderr.toString('UTF-8').eachLine { line -> project.logger.error(line) } - result.rethrowFailure() - } - return stdout.toString('UTF-8').trim() + List javaVersions = task.project.property('javaVersions') as List + return javaVersions.find { it.version == version }.javaHome.absolutePath } /** Return the configuration name used for finding transitive deps of the given dependency. */ @@ -549,7 +351,7 @@ class BuildPlugin implements Plugin { */ static void configureConfigurations(Project project) { // we want to test compileOnly deps! - project.configurations.testCompile.extendsFrom(project.configurations.compileOnly) + project.configurations.getByName(JavaPlugin.TEST_COMPILE_CONFIGURATION_NAME).extendsFrom(project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME)) // we are not shipping these jars, we act like dumb consumers of these things if (project.path.startsWith(':test:fixtures') || project.path == ':build-tools') { @@ -587,9 +389,9 @@ class BuildPlugin implements Plugin { } } - project.configurations.compile.dependencies.all(disableTransitiveDeps) - project.configurations.testCompile.dependencies.all(disableTransitiveDeps) - project.configurations.compileOnly.dependencies.all(disableTransitiveDeps) + project.configurations.getByName(JavaPlugin.COMPILE_CONFIGURATION_NAME).dependencies.all(disableTransitiveDeps) + project.configurations.getByName(JavaPlugin.TEST_COMPILE_CONFIGURATION_NAME).dependencies.all(disableTransitiveDeps) + project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME).dependencies.all(disableTransitiveDeps) project.plugins.withType(ShadowPlugin).whenPluginAdded { Configuration bundle = project.configurations.create('bundle') @@ -603,46 +405,45 @@ class BuildPlugin implements Plugin { if (repository instanceof MavenArtifactRepository) { final MavenArtifactRepository maven = (MavenArtifactRepository) repository assertRepositoryURIUsesHttps(maven, project, maven.getUrl()) - repository.getArtifactUrls().each { uri -> assertRepositoryURIUsesHttps(project, uri) } + repository.getArtifactUrls().each { uri -> assertRepositoryURIUsesHttps(maven, project, uri) } } else if (repository instanceof IvyArtifactRepository) { final IvyArtifactRepository ivy = (IvyArtifactRepository) repository assertRepositoryURIUsesHttps(ivy, project, ivy.getUrl()) } } RepositoryHandler repos = project.repositories - if (System.getProperty("repos.mavenLocal") != null) { + if (System.getProperty('repos.mavenLocal') != null) { // with -Drepos.mavenLocal=true we can force checking the local .m2 repo which is // useful for development ie. bwc tests where we install stuff in the local repository // such that we don't have to pass hardcoded files to gradle repos.mavenLocal() } repos.jcenter() - repos.ivy { - name "elasticsearch" - url "https://artifacts.elastic.co/downloads" - patternLayout { - artifact "elasticsearch/[module]-[revision](-[classifier]).[ext]" + repos.ivy { IvyArtifactRepository repo -> + repo.name = 'elasticsearch' + repo.url = 'https://artifacts.elastic.co/downloads' + repo.patternLayout { IvyPatternRepositoryLayout layout -> + layout.artifact 'elasticsearch/[module]-[revision](-[classifier]).[ext]' } // this header is not a credential but we hack the capability to send this header to avoid polluting our download stats - credentials(HttpHeaderCredentials) { - name = "X-Elastic-No-KPI" - value = "1" - } - authentication { - header(HttpHeaderAuthentication) - } + repo.credentials(HttpHeaderCredentials, { HttpHeaderCredentials creds -> + creds.name = 'X-Elastic-No-KPI' + creds.value = '1' + } as Action) + repo.authentication.create('header', HttpHeaderAuthentication) } - repos.maven { - name "elastic" - url "https://artifacts.elastic.co/maven" + repos.maven { MavenArtifactRepository repo -> + repo.name = 'elastic' + repo.url = 'https://artifacts.elastic.co/maven' } String luceneVersion = VersionProperties.lucene if (luceneVersion.contains('-snapshot')) { // extract the revision number from the version with a regex matcher - String revision = (luceneVersion =~ /\w+-snapshot-([a-z0-9]+)/)[0][1] - repos.maven { - name 'lucene-snapshots' - url "https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}" + List matches = (luceneVersion =~ /\w+-snapshot-([a-z0-9]+)/).getAt(0) as List + String revision = matches.get(1) + repos.maven { MavenArtifactRepository repo -> + repo.name = 'lucene-snapshots' + repo.url = "https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}" } } } @@ -664,6 +465,7 @@ class BuildPlugin implements Plugin { *
    • Set compile time deps back to compile from runtime (known issue with maven-publish plugin)
    • * */ + @CompileDynamic private static Closure fixupDependencies(Project project) { return { XmlProvider xml -> // first find if we have dependencies at all, and grab the node @@ -724,21 +526,22 @@ class BuildPlugin implements Plugin { } /**Configuration generation of maven poms. */ - public static void configurePomGeneration(Project project) { + static void configurePomGeneration(Project project) { // Only works with `enableFeaturePreview('STABLE_PUBLISHING')` // https://github.com/gradle/gradle/issues/5696#issuecomment-396965185 project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> // The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it, // just make a copy. - generatePOMTask.ext.pomFileName = null - doLast { - project.copy { - from generatePOMTask.destination - into "${project.buildDir}/distributions" - rename { - generatePOMTask.ext.pomFileName == null ? - "${project.archivesBaseName}-${project.version}.pom" : - generatePOMTask.ext.pomFileName + ExtraPropertiesExtension ext = generatePOMTask.extensions.getByType(ExtraPropertiesExtension) + ext.set('pomFileName', null) + generatePOMTask.doLast { + project.copy { CopySpec spec -> + spec.from generatePOMTask.destination + spec.into "${project.buildDir}/distributions" + spec.rename { + ext.has('pomFileName') && ext.get('pomFileName') == null ? + "${project.convention.getPlugin(BasePluginConvention).archivesBaseName}-${project.version}.pom" : + ext.get('pomFileName') } } } @@ -748,22 +551,16 @@ class BuildPlugin implements Plugin { assemble.dependsOn(generatePOMTask) } } - project.plugins.withType(MavenPublishPlugin.class).whenPluginAdded { - project.publishing { - publications { - all { MavenPublication publication -> // we only deal with maven - // add exclusions to the pom directly, for each of the transitive deps of this project's deps - publication.pom.withXml(fixupDependencies(project)) - } - } + project.plugins.withType(MavenPublishPlugin).whenPluginAdded { + PublishingExtension publishing = project.extensions.getByType(PublishingExtension) + publishing.publications.all { MavenPublication publication -> // we only deal with maven + // add exclusions to the pom directly, for each of the transitive deps of this project's deps + publication.pom.withXml(fixupDependencies(project)) } project.plugins.withType(ShadowPlugin).whenPluginAdded { - project.publishing { - publications { - nebula(MavenPublication) { - artifacts = [ project.tasks.shadowJar ] - } - } + MavenPublication publication = publishing.publications.maybeCreate('nebula', MavenPublication) + publication.with { + artifacts = [ project.tasks.getByName('shadowJar') ] } } } @@ -775,9 +572,9 @@ class BuildPlugin implements Plugin { static void configureSourceSets(Project project) { project.plugins.withType(ShadowPlugin).whenPluginAdded { ['main', 'test'].each {name -> - SourceSet sourceSet = project.sourceSets.findByName(name) + SourceSet sourceSet = project.extensions.getByType(SourceSetContainer).findByName(name) if (sourceSet != null) { - sourceSet.compileClasspath += project.configurations.bundle + sourceSet.compileClasspath += project.configurations.getByName('bundle') } } } @@ -785,17 +582,23 @@ class BuildPlugin implements Plugin { /** Adds compiler settings to the project */ static void configureCompile(Project project) { - project.ext.compactProfile = 'full' + ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) + ext.set('compactProfile', 'full') + + project.extensions.getByType(JavaPluginExtension).sourceCompatibility = ext.get('minimumRuntimeVersion') as JavaVersion + project.extensions.getByType(JavaPluginExtension).targetCompatibility = ext.get('minimumRuntimeVersion') as JavaVersion + project.afterEvaluate { - project.tasks.withType(JavaCompile) { - final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(it.targetCompatibility) - final compilerJavaHomeFile = new File(project.compilerJavaHome) + File compilerJavaHome = ext.get('compilerJavaHome') as File + + project.tasks.withType(JavaCompile) { JavaCompile compileTask -> + final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(compileTask.targetCompatibility) // we only fork if the Gradle JDK is not the same as the compiler JDK - if (compilerJavaHomeFile.canonicalPath == Jvm.current().javaHome.canonicalPath) { - options.fork = false + if (compilerJavaHome.canonicalPath == Jvm.current().javaHome.canonicalPath) { + compileTask.options.fork = false } else { - options.fork = true - options.forkOptions.javaHome = compilerJavaHomeFile + compileTask.options.fork = true + compileTask.options.forkOptions.javaHome = compilerJavaHome } /* * -path because gradle will send in paths that don't always exist. @@ -804,29 +607,28 @@ class BuildPlugin implements Plugin { */ // don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :) // fail on all javac warnings - options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation,-try' << '-Xdoclint:all' << '-Xdoclint:-missing' + compileTask.options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation,-try' << '-Xdoclint:all' << '-Xdoclint:-missing' // either disable annotation processor completely (default) or allow to enable them if an annotation processor is explicitly defined - if (options.compilerArgs.contains("-processor") == false) { - options.compilerArgs << '-proc:none' + if (compileTask.options.compilerArgs.contains("-processor") == false) { + compileTask.options.compilerArgs << '-proc:none' } - options.encoding = 'UTF-8' - options.incremental = true + compileTask.options.encoding = 'UTF-8' + compileTask.options.incremental = true // TODO: use native Gradle support for --release when available (cf. https://github.com/gradle/gradle/issues/2510) - options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion + compileTask.options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion } // also apply release flag to groovy, which is used in build-tools - project.tasks.withType(GroovyCompile) { - final compilerJavaHomeFile = new File(project.compilerJavaHome) + project.tasks.withType(GroovyCompile) { GroovyCompile compileTask -> // we only fork if the Gradle JDK is not the same as the compiler JDK - if (compilerJavaHomeFile.canonicalPath == Jvm.current().javaHome.canonicalPath) { - options.fork = false + if (compilerJavaHome.canonicalPath == Jvm.current().javaHome.canonicalPath) { + compileTask.options.fork = false } else { - options.fork = true - options.forkOptions.javaHome = compilerJavaHomeFile - options.compilerArgs << '--release' << JavaVersion.toVersion(it.targetCompatibility).majorVersion + compileTask.options.fork = true + compileTask.options.forkOptions.javaHome = compilerJavaHome + compileTask.options.compilerArgs << '--release' << JavaVersion.toVersion(compileTask.targetCompatibility).majorVersion } } } @@ -835,11 +637,12 @@ class BuildPlugin implements Plugin { static void configureJavadoc(Project project) { // remove compiled classes from the Javadoc classpath: http://mail.openjdk.java.net/pipermail/javadoc-dev/2018-January/000400.html final List classes = new ArrayList<>() - project.tasks.withType(JavaCompile) { javaCompile -> + project.tasks.withType(JavaCompile) { JavaCompile javaCompile -> classes.add(javaCompile.destinationDir) } - project.tasks.withType(Javadoc) { javadoc -> - javadoc.executable = new File(project.compilerJavaHome, 'bin/javadoc') + project.tasks.withType(Javadoc) { Javadoc javadoc -> + File compilerJavaHome = project.extensions.getByType(ExtraPropertiesExtension).get('compilerJavaHome') as File + javadoc.executable = new File(compilerJavaHome, 'bin/javadoc') javadoc.classpath = javadoc.getClasspath().filter { f -> return classes.contains(f) == false } @@ -847,34 +650,35 @@ class BuildPlugin implements Plugin { * Generate docs using html5 to suppress a warning from `javadoc` * that the default will change to html5 in the future. */ - javadoc.options.addBooleanOption('html5', true) + (javadoc.options as CoreJavadocOptions).addBooleanOption('html5', true) } configureJavadocJar(project) } /** Adds a javadocJar task to generate a jar containing javadocs. */ static void configureJavadocJar(Project project) { - Jar javadocJarTask = project.task('javadocJar', type: Jar) + Jar javadocJarTask = project.tasks.create('javadocJar', Jar) javadocJarTask.classifier = 'javadoc' javadocJarTask.group = 'build' javadocJarTask.description = 'Assembles a jar containing javadocs.' javadocJarTask.from(project.tasks.getByName(JavaPlugin.JAVADOC_TASK_NAME)) - project.assemble.dependsOn(javadocJarTask) + project.tasks.getByName(BasePlugin.ASSEMBLE_TASK_NAME).dependsOn(javadocJarTask) } static void configureSourcesJar(Project project) { - Jar sourcesJarTask = project.task('sourcesJar', type: Jar) + Jar sourcesJarTask = project.tasks.create('sourcesJar', Jar) sourcesJarTask.classifier = 'sources' sourcesJarTask.group = 'build' sourcesJarTask.description = 'Assembles a jar containing source files.' - sourcesJarTask.from(project.sourceSets.main.allSource) - project.assemble.dependsOn(sourcesJarTask) + sourcesJarTask.from(project.extensions.getByType(SourceSetContainer).getByName(SourceSet.MAIN_SOURCE_SET_NAME).allSource) + project.tasks.getByName(BasePlugin.ASSEMBLE_TASK_NAME).dependsOn(sourcesJarTask) } /** Adds additional manifest info to jars */ static void configureJars(Project project) { - project.ext.licenseFile = null - project.ext.noticeFile = null + ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) + ext.set('licenseFile', null) + ext.set('noticeFile', null) project.tasks.withType(Jar) { Jar jarTask -> // we put all our distributable files under distributions jarTask.destinationDir = new File(project.buildDir, 'distributions') @@ -882,14 +686,15 @@ class BuildPlugin implements Plugin { jarTask.doFirst { // this doFirst is added before the info plugin, therefore it will run // after the doFirst added by the info plugin, and we can override attributes + JavaVersion compilerJavaVersion = ext.get('compilerJavaVersion') as JavaVersion jarTask.manifest.attributes( 'X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch, 'X-Compile-Lucene-Version': VersionProperties.lucene, 'X-Compile-Elasticsearch-Snapshot': VersionProperties.isElasticsearchSnapshot(), 'Build-Date': ZonedDateTime.now(ZoneOffset.UTC), - 'Build-Java-Version': project.compilerJavaVersion) + 'Build-Java-Version': compilerJavaVersion) if (jarTask.manifest.attributes.containsKey('Change') == false) { - logger.warn('Building without git revision id.') + jarTask.logger.warn('Building without git revision id.') jarTask.manifest.attributes('Change': 'Unknown') } else { /* @@ -908,19 +713,24 @@ class BuildPlugin implements Plugin { jarTask.manifest.getAttributes().clear() } } + // add license/notice files project.afterEvaluate { - if (project.licenseFile == null || project.noticeFile == null) { + if (ext.has('licenseFile') == false || ext.get('licenseFile') == null || ext.has('noticeFile') == false || ext.get('noticeFile') == null) { throw new GradleException("Must specify license and notice file for project ${project.path}") } - jarTask.metaInf { - from(project.licenseFile.parent) { - include project.licenseFile.name - rename { 'LICENSE.txt' } + + File licenseFile = ext.get('licenseFile') as File + File noticeFile = ext.get('noticeFile') as File + + jarTask.metaInf { CopySpec spec -> + spec.from(licenseFile.parent) { CopySpec from -> + from.include licenseFile.name + from.rename { 'LICENSE.txt' } } - from(project.noticeFile.parent) { - include project.noticeFile.name - rename { 'NOTICE.txt' } + spec.from(noticeFile.parent) { CopySpec from -> + from.include noticeFile.name + from.rename { 'NOTICE.txt' } } } } @@ -931,35 +741,35 @@ class BuildPlugin implements Plugin { * normal jar with the shadow jar so we no longer want to run * the jar task. */ - project.tasks.jar.enabled = false - project.tasks.shadowJar { + project.tasks.getByName(JavaPlugin.JAR_TASK_NAME).enabled = false + project.tasks.getByName('shadowJar').configure { ShadowJar shadowJar -> /* * Replace the default "shadow" classifier with null * which will leave the classifier off of the file name. */ - classifier = null + shadowJar.classifier = null /* * Not all cases need service files merged but it is * better to be safe */ - mergeServiceFiles() + shadowJar.mergeServiceFiles() /* * Bundle dependencies of the "bundled" configuration. */ - configurations = [project.configurations.bundle] + shadowJar.configurations = [project.configurations.getByName('bundle')] } // Make sure we assemble the shadow jar - project.tasks.assemble.dependsOn project.tasks.shadowJar - project.artifacts { - apiElements project.tasks.shadowJar - } + project.tasks.getByName(BasePlugin.ASSEMBLE_TASK_NAME).dependsOn project.tasks.getByName('shadowJar') + project.artifacts.add('apiElements', project.tasks.getByName('shadowJar')) } } static void configureTestTasks(Project project) { + ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) + // Default test task should run only unit tests - project.tasks.withType(Test).matching { it.name == 'test' }.all { - include '**/*Tests.class' + project.tasks.withType(Test).matching { Test task -> task.name == 'test' }.all { Test task -> + task.include '**/*Tests.class' } // none of this stuff is applicable to the `:buildSrc` project tests @@ -969,150 +779,127 @@ class BuildPlugin implements Plugin { project.tasks.withType(Test) { Test test -> File testOutputDir = new File(test.reports.junitXml.getDestination(), "output") - doFirst { + ErrorReportingTestListener listener = new ErrorReportingTestListener(test.testLogging, testOutputDir) + test.extensions.add(ErrorReportingTestListener, 'errorReportingTestListener', listener) + test.addTestOutputListener(listener) + test.addTestListener(listener) + + /* + * We use lazy-evaluated strings in order to configure system properties whose value will not be known until + * execution time (e.g. cluster port numbers). Adding these via the normal DSL doesn't work as these get treated + * as task inputs and therefore Gradle attempts to snapshot them before/after task execution. This fails due + * to the GStrings containing references to non-serializable objects. + * + * We bypass this by instead passing this system properties vi a CommandLineArgumentProvider. This has the added + * side-effect that these properties are NOT treated as inputs, therefore they don't influence things like the + * build cache key or up to date checking. + */ + SystemPropertyCommandLineArgumentProvider nonInputProperties = new SystemPropertyCommandLineArgumentProvider() + + test.doFirst { project.mkdir(testOutputDir) project.mkdir(heapdumpDir) project.mkdir(test.workingDir) + + if (project.property('inFipsJvm')) { + nonInputProperties.systemProperty('runtime.java', "${-> (ext.get('runtimeJavaVersion') as JavaVersion).getMajorVersion()}FIPS") + } else { + nonInputProperties.systemProperty('runtime.java', "${-> (ext.get('runtimeJavaVersion') as JavaVersion).getMajorVersion()}") + } } - def listener = new ErrorReportingTestListener(test.testLogging, testOutputDir) - test.extensions.add(ErrorReportingTestListener, 'errorReportingTestListener', listener) - addTestOutputListener(listener) - addTestListener(listener) + test.jvmArgumentProviders.add(nonInputProperties) + test.extensions.getByType(ExtraPropertiesExtension).set('nonInputProperties', nonInputProperties) - executable = "${project.runtimeJavaHome}/bin/java" - workingDir = project.file("${project.buildDir}/testrun/${test.name}") - maxParallelForks = project.rootProject.ext.defaultParallel + test.executable = "${ext.get('runtimeJavaHome')}/bin/java" + test.workingDir = project.file("${project.buildDir}/testrun/${test.name}") + test.maxParallelForks = project.rootProject.extensions.getByType(ExtraPropertiesExtension).get('defaultParallel') as Integer - exclude '**/*$*.class' + test.exclude '**/*$*.class' - jvmArgs "-Xmx${System.getProperty('tests.heap.size', '512m')}", + test.jvmArgs "-Xmx${System.getProperty('tests.heap.size', '512m')}", "-Xms${System.getProperty('tests.heap.size', '512m')}", '-XX:+HeapDumpOnOutOfMemoryError', - "-XX:HeapDumpPath=$heapdumpDir" + "-XX:HeapDumpPath=$heapdumpDir", + '--illegal-access=warn' - if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9) { - jvmArgs '--illegal-access=warn' - } if (System.getProperty('tests.jvm.argline')) { - jvmArgs System.getProperty('tests.jvm.argline').split(" ") + test.jvmArgs System.getProperty('tests.jvm.argline').split(" ") } if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) { - jvmArgs '-ea', '-esa' + test.jvmArgs '-ea', '-esa' } // we use './temp' since this is per JVM and tests are forbidden from writing to CWD - systemProperties 'gradle.dist.lib': new File(project.class.location.toURI()).parent, + test.systemProperties 'gradle.dist.lib': new File(project.class.location.toURI()).parent, 'gradle.worker.jar': "${project.gradle.getGradleUserHomeDir()}/caches/${project.gradle.gradleVersion}/workerMain/gradle-worker.jar", 'gradle.user.home': project.gradle.getGradleUserHomeDir(), 'java.io.tmpdir': './temp', 'java.awt.headless': 'true', 'tests.gradle': 'true', 'tests.artifact': project.name, - 'tests.task': path, + 'tests.task': test.path, 'tests.security.manager': 'true', - 'tests.seed': project.testSeed, - 'jna.nosys': 'true', - 'compiler.java': project.ext.compilerJavaVersion.getMajorVersion() + 'tests.seed': project.property('testSeed'), + 'jna.nosys': 'true' + + nonInputProperties.systemProperty('compiler.java', "${-> (ext.get('compilerJavaVersion') as JavaVersion).getMajorVersion()}") - if (project.ext.inFipsJvm) { - systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + "FIPS" - } else { - systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() - } // TODO: remove setting logging level via system property - systemProperty 'tests.logger.level', 'WARN' + test.systemProperty 'tests.logger.level', 'WARN' System.getProperties().each { key, value -> - if ((key.startsWith('tests.') || key.startsWith('es.'))) { - systemProperty key, value + if ((key.toString().startsWith('tests.') || key.toString().startsWith('es.'))) { + test.systemProperty key.toString(), value } } // TODO: remove this once ctx isn't added to update script params in 7.0 - systemProperty 'es.scripting.update.ctx_in_params', 'false' + test.systemProperty 'es.scripting.update.ctx_in_params', 'false' - testLogging { - showExceptions = true - showCauses = true - exceptionFormat = 'full' + test.testLogging { TestLoggingContainer logging -> + logging.showExceptions = true + logging.showCauses = true + logging.exceptionFormat = 'full' } project.plugins.withType(ShadowPlugin).whenPluginAdded { // Test against a shadow jar if we made one - classpath -= project.tasks.compileJava.outputs.files - classpath += project.tasks.shadowJar.outputs.files - - dependsOn project.tasks.shadowJar - } - } - } - } + test.classpath -= project.tasks.getByName('compileJava').outputs.files + test.classpath += project.tasks.getByName('shadowJar').outputs.files - private static int numberOfPhysicalCores(Project project) { - if (project.file("/proc/cpuinfo").exists()) { - // Count physical cores on any Linux distro ( don't count hyper-threading ) - Map socketToCore = [:] - String currentID = "" - project.file("/proc/cpuinfo").readLines().forEach({ line -> - if (line.contains(":")) { - List parts = line.split(":", 2).collect({it.trim()}) - String name = parts[0], value = parts[1] - // the ID of the CPU socket - if (name == "physical id") { - currentID = value - } - // number of cores not including hyper-threading - if (name == "cpu cores") { - assert currentID.isEmpty() == false - socketToCore[currentID] = Integer.valueOf(value) - currentID = "" - } + test.dependsOn project.tasks.getByName('shadowJar') } - }) - return socketToCore.values().sum() - } else if ('Mac OS X'.equals(System.getProperty('os.name'))) { - // Ask macOS to count physical CPUs for us - ByteArrayOutputStream stdout = new ByteArrayOutputStream() - project.exec { - executable 'sysctl' - args '-n', 'hw.physicalcpu' - standardOutput = stdout } - return Integer.parseInt(stdout.toString('UTF-8').trim()) - } else { - // guess that it is half the number of processors (which is wrong on systems that do not have simultaneous multi-threading) - // TODO: implement this on Windows - return Runtime.getRuntime().availableProcessors() / 2 } } private static configurePrecommit(Project project) { Task precommit = PrecommitTasks.create(project, true) - project.check.dependsOn(precommit) - project.test.mustRunAfter(precommit) + project.tasks.getByName(LifecycleBasePlugin.CHECK_TASK_NAME).dependsOn(precommit) + project.tasks.getByName(JavaPlugin.TEST_TASK_NAME).mustRunAfter(precommit) // only require dependency licenses for non-elasticsearch deps - project.dependencyLicenses.dependencies = project.configurations.runtime.fileCollection { - it.group.startsWith('org.elasticsearch') == false - } - project.configurations.compileOnly + (project.tasks.getByName('dependencyLicenses') as DependencyLicensesTask).dependencies = project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME).fileCollection { Dependency dependency -> + dependency.group.startsWith('org.elasticsearch') == false + } - project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME) project.plugins.withType(ShadowPlugin).whenPluginAdded { - project.dependencyLicenses.dependencies += project.configurations.bundle.fileCollection { - it.group.startsWith('org.elasticsearch') == false + (project.tasks.getByName('dependencyLicenses') as DependencyLicensesTask).dependencies += project.configurations.getByName('bundle').fileCollection { Dependency dependency -> + dependency.group.startsWith('org.elasticsearch') == false } } } private static configureDependenciesInfo(Project project) { - Task deps = project.tasks.create("dependenciesInfo", DependenciesInfoTask.class) - deps.runtimeConfiguration = project.configurations.runtime + DependenciesInfoTask deps = project.tasks.create("dependenciesInfo", DependenciesInfoTask) + deps.runtimeConfiguration = project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME) project.plugins.withType(ShadowPlugin).whenPluginAdded { deps.runtimeConfiguration = project.configurations.create('infoDeps') - deps.runtimeConfiguration.extendsFrom(project.configurations.runtime, project.configurations.bundle) + deps.runtimeConfiguration.extendsFrom(project.configurations.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME), project.configurations.getByName('bundle')) } - deps.compileOnlyConfiguration = project.configurations.compileOnly + deps.compileOnlyConfiguration = project.configurations.getByName(JavaPlugin.COMPILE_ONLY_CONFIGURATION_NAME) project.afterEvaluate { - deps.mappings = project.dependencyLicenses.mappings + deps.mappings = (project.tasks.getByName('dependencyLicenses') as DependencyLicensesTask).mappings } } @@ -1124,11 +911,12 @@ class BuildPlugin implements Plugin { * the reproduction line from one run be useful on another run. */ static String setupSeed(Project project) { - if (project.rootProject.ext.has('testSeed')) { + ExtraPropertiesExtension ext = project.rootProject.extensions.getByType(ExtraPropertiesExtension) + if (ext.has('testSeed')) { /* Skip this if we've already pinned the testSeed. It is important * that this checks the rootProject so that we know we've only ever * initialized one time. */ - return project.rootProject.ext.testSeed + return ext.get('testSeed') } String testSeed = System.getProperty('tests.seed') @@ -1137,7 +925,7 @@ class BuildPlugin implements Plugin { testSeed = Long.toUnsignedString(seed, 16).toUpperCase(Locale.ROOT) } - project.rootProject.ext.testSeed = testSeed + ext.set('testSeed', testSeed) return testSeed } @@ -1169,4 +957,19 @@ class BuildPlugin implements Plugin { }) } } + + private static class SystemPropertyCommandLineArgumentProvider implements CommandLineArgumentProvider { + private final Map systemProperties = [:] + + void systemProperty(String key, Object value) { + systemProperties.put(key, value) + } + + @Override + Iterable asArguments() { + return systemProperties.collect { key, value -> + "-D${key}=${value.toString()}".toString() + } + } + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index d5bdd2117023c..e04d0966c412d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -28,6 +28,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.test.RunTask import org.elasticsearch.gradle.testclusters.TestClustersPlugin import org.gradle.api.InvalidUserDataException +import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.publish.maven.MavenPublication @@ -43,13 +44,13 @@ import java.util.regex.Pattern /** * Encapsulates build configuration for an Elasticsearch plugin. */ -class PluginBuildPlugin extends BuildPlugin { +class PluginBuildPlugin implements Plugin { public static final String PLUGIN_EXTENSION_NAME = 'esplugin' @Override void apply(Project project) { - super.apply(project) + project.pluginManager.apply(BuildPlugin) PluginPropertiesExtension extension = project.extensions.create(PLUGIN_EXTENSION_NAME, PluginPropertiesExtension, project) configureDependencies(project) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index e14a8f97ba81d..e5e4f021507f9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -118,15 +118,13 @@ class PrecommitTasks { } private static Task configureThirdPartyAudit(Project project) { - ThirdPartyAuditTask thirdPartyAuditTask = project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') - thirdPartyAuditTask.configure { - dependsOn(buildResources) - signatureFile = buildResources.copy("forbidden/third-party-audit.txt") - javaHome = project.runtimeJavaHome - targetCompatibility = project.runtimeJavaVersion + return project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) { task -> + task.dependsOn(buildResources) + task.signatureFile = buildResources.copy("forbidden/third-party-audit.txt") + task.javaHome = project.runtimeJavaHome + task.targetCompatibility.set(project.provider({ project.runtimeJavaVersion })) } - return thirdPartyAuditTask } private static Task configureForbiddenApisCli(Project project) { @@ -134,15 +132,18 @@ class PrecommitTasks { ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') project.tasks.withType(CheckForbiddenApis) { dependsOn(buildResources) - targetCompatibility = project.runtimeJavaVersion.getMajorVersion() - if (project.runtimeJavaVersion > JavaVersion.VERSION_11) { - doLast { - project.logger.info( - "Forbidden APIs does not support java version past 11. Will use the signatures from 11 for ", - project.runtimeJavaVersion - ) + doFirst { + // we need to defer this configuration since we don't know the runtime java version until execution time + targetCompatibility = project.runtimeJavaVersion.getMajorVersion() + if (project.runtimeJavaVersion > JavaVersion.VERSION_11) { + doLast { + project.logger.info( + "Forbidden APIs does not support java version past 11. Will use the signatures from 11 for ", + project.runtimeJavaVersion + ) + } + targetCompatibility = JavaVersion.VERSION_11.getMajorVersion() } - targetCompatibility = JavaVersion.VERSION_11.getMajorVersion() } bundledSignatures = [ "jdk-unsafe", "jdk-deprecated", "jdk-non-portable", "jdk-system-out" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index c0bf2a5dccee5..bc5c7ff0871bb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -300,12 +300,6 @@ class ClusterFormationTasks { // its run after plugins have been installed, as the extra config files may belong to plugins setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node) - // If the node runs in a FIPS 140-2 JVM, the BCFKS default keystore will be password protected - if (project.inFipsJvm){ - node.config.systemProperties.put('javax.net.ssl.trustStorePassword', 'password') - node.config.systemProperties.put('javax.net.ssl.keyStorePassword', 'password') - } - // extra setup commands for (Map.Entry command : node.config.setupCommands.entrySet()) { // the first argument is the actual script name, relative to home @@ -402,16 +396,17 @@ class ClusterFormationTasks { if (node.nodeVersion.major >= 7) { esConfig['indices.breaker.total.use_real_memory'] = false } - for (Map.Entry setting : node.config.settings) { - if (setting.value == null) { - esConfig.remove(setting.key) - } else { - esConfig.put(setting.key, setting.value) - } - } Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) writeConfig.doFirst { + for (Map.Entry setting : node.config.settings) { + if (setting.value == null) { + esConfig.remove(setting.key) + } else { + esConfig.put(setting.key, setting.value) + } + } + esConfig = configFilter.call(esConfig) File configFile = new File(node.pathConf, 'elasticsearch.yml') logger.info("Configuring ${configFile}") @@ -732,6 +727,12 @@ class ClusterFormationTasks { } start.doLast(elasticsearchRunner) start.doFirst { + // If the node runs in a FIPS 140-2 JVM, the BCFKS default keystore will be password protected + if (project.inFipsJvm){ + node.config.systemProperties.put('javax.net.ssl.trustStorePassword', 'password') + node.config.systemProperties.put('javax.net.ssl.keyStorePassword', 'password') + } + // Configure ES JAVA OPTS - adds system properties, assertion flags, remote debug etc List esJavaOpts = [node.env.get('ES_JAVA_OPTS', '')] String collectedSystemProperties = node.config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 52c498aa98d79..40cefdcc25fb9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -86,52 +86,25 @@ class RestIntegTestTask extends DefaultTask { runner.include('**/*IT.class') runner.systemProperty('tests.rest.load_packaged', 'false') - /* - * We use lazy-evaluated strings in order to configure system properties whose value will not be known until - * execution time (e.g. cluster port numbers). Adding these via the normal DSL doesn't work as these get treated - * as task inputs and therefore Gradle attempts to snapshot them before/after task execution. This fails due - * to the GStrings containing references to non-serializable objects. - * - * We bypass this by instead passing this system properties vi a CommandLineArgumentProvider. This has the added - * side-effect that these properties are NOT treated as inputs, therefore they don't influence things like the - * build cache key or up to date checking. - */ - def nonInputProperties = new CommandLineArgumentProvider() { - private final Map systemProperties = [:] - - void systemProperty(String key, Object value) { - systemProperties.put(key, value) - } - - @Override - Iterable asArguments() { - return systemProperties.collect { key, value -> - "-D${key}=${value.toString()}".toString() - } - } - } - runner.jvmArgumentProviders.add(nonInputProperties) - runner.ext.nonInputProperties = nonInputProperties - if (System.getProperty("tests.rest.cluster") == null) { if (System.getProperty("tests.cluster") != null || System.getProperty("tests.clustername") != null) { throw new IllegalArgumentException("tests.rest.cluster, tests.cluster, and tests.clustername must all be null or non-null") } if (usesTestclusters == true) { ElasticsearchCluster cluster = project.testClusters."${name}" - nonInputProperties.systemProperty('tests.rest.cluster', "${-> cluster.allHttpSocketURI.join(",") }") - nonInputProperties.systemProperty('tests.cluster', "${-> cluster.transportPortURI }") - nonInputProperties.systemProperty('tests.clustername', "${-> cluster.getName() }") + runner.nonInputProperties.systemProperty('tests.rest.cluster', "${-> cluster.allHttpSocketURI.join(",") }") + runner.nonInputProperties.systemProperty('tests.cluster', "${-> cluster.transportPortURI }") + runner.nonInputProperties.systemProperty('tests.clustername', "${-> cluster.getName() }") } else { // we pass all nodes to the rest cluster to allow the clients to round-robin between them // this is more realistic than just talking to a single node - nonInputProperties.systemProperty('tests.rest.cluster', "${-> nodes.collect { it.httpUri() }.join(",")}") - nonInputProperties.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}") + runner.nonInputProperties.systemProperty('tests.rest.cluster', "${-> nodes.collect { it.httpUri() }.join(",")}") + runner.nonInputProperties.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}") // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass // both as separate sysprops - nonInputProperties.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") - nonInputProperties.systemProperty('tests.clustername', "${-> nodes[0].clusterName}") + runner.nonInputProperties.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") + runner.nonInputProperties.systemProperty('tests.clustername', "${-> nodes[0].clusterName}") // dump errors and warnings from cluster log on failure TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index 2a858206ebd72..6d895abaa97c7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -27,11 +27,14 @@ import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.InvalidUserDataException +import org.gradle.api.JavaVersion import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.artifacts.Configuration +import org.gradle.api.plugins.ExtraPropertiesExtension import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.JavaPlugin +import org.gradle.api.plugins.JavaPluginExtension import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.SourceSetContainer import org.gradle.api.tasks.compile.JavaCompile @@ -57,10 +60,13 @@ class StandaloneRestTestPlugin implements Plugin { project.pluginManager.apply(JavaBasePlugin) project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask) - BuildPlugin.globalBuildInfo(project) BuildPlugin.configureRepositories(project) BuildPlugin.configureTestTasks(project) + ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) + project.extensions.getByType(JavaPluginExtension).sourceCompatibility = ext.get('minimumRuntimeVersion') as JavaVersion + project.extensions.getByType(JavaPluginExtension).targetCompatibility = ext.get('minimumRuntimeVersion') as JavaVersion + // only setup tests to build SourceSetContainer sourceSets = project.extensions.getByType(SourceSetContainer) SourceSet testSourceSet = sourceSets.create('test') diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/JdkJarHellCheck.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java similarity index 100% rename from buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/JdkJarHellCheck.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/JdkJarHellCheck.java diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LazyFileOutputStream.java similarity index 100% rename from buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LazyFileOutputStream.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/LazyFileOutputStream.java diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java similarity index 100% rename from buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/LoggedExec.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/Version.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Version.java similarity index 100% rename from buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/Version.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/Version.java diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/VersionProperties.java b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java similarity index 100% rename from buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/VersionProperties.java rename to buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java new file mode 100644 index 0000000000000..8537775ee129b --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java @@ -0,0 +1,276 @@ +package org.elasticsearch.gradle.info; + +import org.elasticsearch.gradle.OS; +import org.gradle.api.DefaultTask; +import org.gradle.api.GradleException; +import org.gradle.api.JavaVersion; +import org.gradle.api.file.RegularFileProperty; +import org.gradle.api.model.ObjectFactory; +import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputDirectory; +import org.gradle.api.tasks.Nested; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; +import org.gradle.api.tasks.TaskAction; +import org.gradle.internal.jvm.Jvm; +import org.gradle.process.ExecResult; + +import javax.inject.Inject; +import java.io.BufferedWriter; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.io.Writer; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.List; + +import static java.nio.charset.StandardCharsets.UTF_8; + +@CacheableTask +public class GenerateGlobalBuildInfoTask extends DefaultTask { + private JavaVersion minimumCompilerVersion; + private JavaVersion minimumRuntimeVersion; + private File compilerJavaHome; + private File runtimeJavaHome; + private List javaVersions; + private final RegularFileProperty outputFile; + private final RegularFileProperty compilerVersionFile; + private final RegularFileProperty runtimeVersionFile; + private final RegularFileProperty fipsJvmFile; + + @Inject + public GenerateGlobalBuildInfoTask(ObjectFactory objectFactory) { + this.outputFile = objectFactory.fileProperty(); + this.compilerVersionFile = objectFactory.fileProperty(); + this.runtimeVersionFile = objectFactory.fileProperty(); + this.fipsJvmFile = objectFactory.fileProperty(); + } + + @Input + public JavaVersion getMinimumCompilerVersion() { + return minimumCompilerVersion; + } + + public void setMinimumCompilerVersion(JavaVersion minimumCompilerVersion) { + this.minimumCompilerVersion = minimumCompilerVersion; + } + + @Input + public JavaVersion getMinimumRuntimeVersion() { + return minimumRuntimeVersion; + } + + public void setMinimumRuntimeVersion(JavaVersion minimumRuntimeVersion) { + this.minimumRuntimeVersion = minimumRuntimeVersion; + } + + @InputDirectory + @PathSensitive(PathSensitivity.RELATIVE) + public File getCompilerJavaHome() { + return compilerJavaHome; + } + + public void setCompilerJavaHome(File compilerJavaHome) { + this.compilerJavaHome = compilerJavaHome; + } + + @InputDirectory + @PathSensitive(PathSensitivity.RELATIVE) + public File getRuntimeJavaHome() { + return runtimeJavaHome; + } + + public void setRuntimeJavaHome(File runtimeJavaHome) { + this.runtimeJavaHome = runtimeJavaHome; + } + + @Nested + public List getJavaVersions() { + return javaVersions; + } + + public void setJavaVersions(List javaVersions) { + this.javaVersions = javaVersions; + } + + @OutputFile + public RegularFileProperty getOutputFile() { + return outputFile; + } + + @OutputFile + public RegularFileProperty getCompilerVersionFile() { + return compilerVersionFile; + } + + @OutputFile + public RegularFileProperty getRuntimeVersionFile() { + return runtimeVersionFile; + } + + @OutputFile + public RegularFileProperty getFipsJvmFile() { + return fipsJvmFile; + } + + @TaskAction + public void generate() { + String javaVendor = System.getProperty("java.vendor"); + String gradleJavaVersion = System.getProperty("java.version"); + String gradleJavaVersionDetails = javaVendor + " " + gradleJavaVersion + " [" + System.getProperty("java.vm.name") + + " " + System.getProperty("java.vm.version") + "]"; + + String compilerJavaVersionDetails = gradleJavaVersionDetails; + JavaVersion compilerJavaVersionEnum = JavaVersion.current(); + String runtimeJavaVersionDetails = gradleJavaVersionDetails; + JavaVersion runtimeJavaVersionEnum = JavaVersion.current(); + File gradleJavaHome = Jvm.current().getJavaHome(); + boolean inFipsJvm = false; + + try { + if (Files.isSameFile(compilerJavaHome.toPath(), gradleJavaHome.toPath()) == false) { + if (compilerJavaHome.exists()) { + compilerJavaVersionDetails = findJavaVersionDetails(compilerJavaHome); + compilerJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(compilerJavaHome)); + } else { + throw new RuntimeException("Compiler Java home path of '" + compilerJavaHome + "' does not exist"); + } + } + + if (Files.isSameFile(runtimeJavaHome.toPath(), gradleJavaHome.toPath()) == false) { + if (runtimeJavaHome.exists()) { + runtimeJavaVersionDetails = findJavaVersionDetails(runtimeJavaHome); + runtimeJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(runtimeJavaHome)); + + // We don't expect Gradle to be running in a FIPS JVM + String inFipsJvmScript = "print(java.security.Security.getProviders()[0].name.toLowerCase().contains(\"fips\"));"; + inFipsJvm = Boolean.parseBoolean(runJavaAsScript(runtimeJavaHome, inFipsJvmScript)); + } else { + throw new RuntimeException("Runtime Java home path of '" + compilerJavaHome + "' does not exist"); + } + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + try (BufferedWriter writer = new BufferedWriter(new FileWriter(outputFile.getAsFile().get()))) { + writer.write(" Gradle Version : " + getProject().getGradle().getGradleVersion() + "\n"); + writer.write(" OS Info : " + System.getProperty("os.name") + " " + System.getProperty("os.version") + + " (" + System.getProperty("os.arch") + ")\n"); + if (gradleJavaVersionDetails.equals(compilerJavaVersionDetails) == false + || gradleJavaVersionDetails.equals(runtimeJavaVersionDetails) == false) { + writer.write(" Compiler JDK Version : " + compilerJavaVersionEnum + " (" + compilerJavaVersionDetails + ")\n"); + writer.write(" Compiler java.home : " + compilerJavaHome + "\n"); + writer.write(" Runtime JDK Version : " + runtimeJavaVersionEnum + " (" + runtimeJavaVersionDetails + ")\n"); + writer.write(" Runtime java.home : " + runtimeJavaHome + "\n"); + writer.write(" Gradle JDK Version : " + JavaVersion.toVersion(gradleJavaVersion) + + " (" + gradleJavaVersionDetails + ")\n"); + writer.write(" Gradle java.home : " + gradleJavaHome); + } else { + writer.write(" JDK Version : " + JavaVersion.toVersion(gradleJavaVersion) + + " (" + gradleJavaVersionDetails + ")\n"); + writer.write(" JAVA_HOME : " + gradleJavaHome); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + // enforce Java version + if (compilerJavaVersionEnum.compareTo(minimumCompilerVersion) < 0) { + String message = "The compiler java.home must be set to a JDK installation directory for Java " + minimumCompilerVersion + + " but is [" + compilerJavaHome + "] corresponding to [" + compilerJavaVersionEnum + "]"; + throw new GradleException(message); + } + + if (runtimeJavaVersionEnum.compareTo(minimumRuntimeVersion) < 0) { + String message = "The runtime java.home must be set to a JDK installation directory for Java " + minimumRuntimeVersion + + " but is [" + runtimeJavaHome + "] corresponding to [" + runtimeJavaVersionEnum + "]"; + throw new GradleException(message); + } + + for (JavaHome javaVersion : javaVersions) { + File javaHome = javaVersion.getJavaHome(); + if (javaHome == null) { + continue; + } + JavaVersion javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(javaHome)); + JavaVersion expectedJavaVersionEnum; + int version = javaVersion.getVersion(); + if (version < 9) { + expectedJavaVersionEnum = JavaVersion.toVersion("1." + version); + } else { + expectedJavaVersionEnum = JavaVersion.toVersion(Integer.toString(version)); + } + if (javaVersionEnum != expectedJavaVersionEnum) { + String message = "The environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java " + + expectedJavaVersionEnum + " but is [" + javaHome + "] corresponding to [" + javaVersionEnum + "]"; + throw new GradleException(message); + } + } + + writeToFile(compilerVersionFile.getAsFile().get(), compilerJavaVersionEnum.name()); + writeToFile(runtimeVersionFile.getAsFile().get(), runtimeJavaVersionEnum.name()); + writeToFile(fipsJvmFile.getAsFile().get(), Boolean.toString(inFipsJvm)); + } + + private void writeToFile(File file, String content) { + try (Writer writer = new FileWriter(file)) { + writer.write(content); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + /** + * Finds printable java version of the given JAVA_HOME + */ + private String findJavaVersionDetails(File javaHome) { + String versionInfoScript = "print(" + + "java.lang.System.getProperty(\"java.vendor\") + \" \" + java.lang.System.getProperty(\"java.version\") + " + + "\" [\" + java.lang.System.getProperty(\"java.vm.name\") + \" \" + java.lang.System.getProperty(\"java.vm.version\") + \"]\");"; + return runJavaAsScript(javaHome, versionInfoScript).trim(); + } + + /** + * Finds the parsable java specification version + */ + private String findJavaSpecificationVersion(File javaHome) { + String versionScript = "print(java.lang.System.getProperty(\"java.specification.version\"));"; + return runJavaAsScript(javaHome, versionScript); + } + + /** + * Runs the given javascript using jjs from the jdk, and returns the output + */ + private String runJavaAsScript(File javaHome, String script) { + ByteArrayOutputStream stdout = new ByteArrayOutputStream(); + ByteArrayOutputStream stderr = new ByteArrayOutputStream(); + if (OS.current() == OS.WINDOWS) { + // gradle/groovy does not properly escape the double quote for windows + script = script.replace("\"", "\\\""); + } + File jrunscriptPath = new File(javaHome, "bin/jrunscript"); + String finalScript = script; + ExecResult result = getProject().exec(spec -> { + spec.setExecutable(jrunscriptPath); + spec.args("-e", finalScript); + spec.setStandardOutput(stdout); + spec.setErrorOutput(stderr); + spec.setIgnoreExitValue(true); + }); + + if (result.getExitValue() != 0) { + getLogger().error("STDOUT:"); + Arrays.stream(stdout.toString(UTF_8).split(System.getProperty("line.separator"))).forEach(getLogger()::error); + getLogger().error("STDERR:"); + Arrays.stream(stderr.toString(UTF_8).split(System.getProperty("line.separator"))).forEach(getLogger()::error); + result.rethrowFailure(); + } + return stdout.toString(UTF_8).trim(); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java new file mode 100644 index 0000000000000..f0f34e84261c6 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java @@ -0,0 +1,198 @@ +package org.elasticsearch.gradle.info; + +import org.elasticsearch.gradle.OS; +import org.gradle.api.GradleException; +import org.gradle.api.JavaVersion; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.plugins.ExtraPropertiesExtension; +import org.gradle.internal.jvm.Jvm; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class GlobalBuildInfoPlugin implements Plugin { + private static final String GLOBAL_INFO_EXTENSION_NAME = "globalInfo"; + private static Integer _defaultParallel = null; + + @Override + public void apply(Project project) { + if (project != project.getRootProject()) { + throw new IllegalStateException(this.getClass().getName() + " can only be applied to the root project."); + } + + GlobalInfoExtension extension = project.getExtensions().create(GLOBAL_INFO_EXTENSION_NAME, GlobalInfoExtension.class); + + JavaVersion minimumCompilerVersion = JavaVersion.toVersion(getResourceContents("/minimumCompilerVersion")); + JavaVersion minimumRuntimeVersion = JavaVersion.toVersion(getResourceContents("/minimumRuntimeVersion")); + + File compilerJavaHome = findCompilerJavaHome(); + File runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome); + + final List javaVersions = new ArrayList<>(); + for (int version = 8; version <= Integer.parseInt(minimumCompilerVersion.getMajorVersion()); version++) { + if (System.getenv(getJavaHomeEnvVarName(Integer.toString(version))) != null) { + javaVersions.add(JavaHome.of(version, new File(findJavaHome(Integer.toString(version))))); + } + } + + GenerateGlobalBuildInfoTask generateTask = project.getTasks().create("generateGlobalBuildInfo", + GenerateGlobalBuildInfoTask.class, task -> { + task.setJavaVersions(javaVersions); + task.setMinimumCompilerVersion(minimumCompilerVersion); + task.setMinimumRuntimeVersion(minimumRuntimeVersion); + task.setCompilerJavaHome(compilerJavaHome); + task.setRuntimeJavaHome(runtimeJavaHome); + task.getOutputFile().set(new File(project.getBuildDir(), "global-build-info")); + task.getCompilerVersionFile().set(new File(project.getBuildDir(), "java-compiler-version")); + task.getRuntimeVersionFile().set(new File(project.getBuildDir(), "java-runtime-version")); + task.getFipsJvmFile().set(new File(project.getBuildDir(), "in-fips-jvm")); + }); + + PrintGlobalBuildInfoTask printTask = project.getTasks().create("printGlobalBuildInfo", PrintGlobalBuildInfoTask.class, task -> { + task.getBuildInfoFile().set(generateTask.getOutputFile()); + task.getCompilerVersionFile().set(generateTask.getCompilerVersionFile()); + task.getRuntimeVersionFile().set(generateTask.getRuntimeVersionFile()); + task.getFipsJvmFile().set(generateTask.getFipsJvmFile()); + task.setGlobalInfoListeners(extension.listeners); + }); + + project.getExtensions().getByType(ExtraPropertiesExtension.class).set("defaultParallel", findDefaultParallel(project)); + + project.allprojects(p -> { + // Make sure than any task execution generates and prints build info + p.getTasks().all(task -> { + if (task != generateTask && task != printTask) { + task.dependsOn(printTask); + } + }); + + ExtraPropertiesExtension ext = p.getExtensions().getByType(ExtraPropertiesExtension.class); + + ext.set("compilerJavaHome", compilerJavaHome); + ext.set("runtimeJavaHome", runtimeJavaHome); + ext.set("isRuntimeJavaHomeSet", compilerJavaHome.equals(runtimeJavaHome) == false); + ext.set("javaVersions", javaVersions); + ext.set("minimumCompilerVersion", minimumCompilerVersion); + ext.set("minimumRuntimeVersion", minimumRuntimeVersion); + ext.set("gradleJavaVersion", Jvm.current().getJavaVersion()); + }); + } + + private static File findCompilerJavaHome() { + String compilerJavaHome = System.getenv("JAVA_HOME"); + String compilerJavaProperty = System.getProperty("compiler.java"); + + if (compilerJavaProperty != null) { + compilerJavaHome = findJavaHome(compilerJavaProperty); + } + + // if JAVA_HOME is not set,so we use the JDK that Gradle was run with. + return compilerJavaHome == null ? Jvm.current().getJavaHome() : new File(compilerJavaHome); + } + + private static File findRuntimeJavaHome(final File compilerJavaHome) { + String runtimeJavaProperty = System.getProperty("runtime.java"); + + if (runtimeJavaProperty != null) { + return new File(findJavaHome(runtimeJavaProperty)); + } + + return System.getenv("RUNTIME_JAVA_HOME") == null ? compilerJavaHome : new File(System.getenv("RUNTIME_JAVA_HOME")); + } + + private static String findJavaHome(String version) { + String versionedJavaHome = System.getenv(getJavaHomeEnvVarName(version)); + if (versionedJavaHome == null) { + throw new GradleException( + "$versionedVarName must be set to build Elasticsearch. " + + "Note that if the variable was just set you might have to run `./gradlew --stop` for " + + "it to be picked up. See https://github.com/elastic/elasticsearch/issues/31399 details." + ); + } + return versionedJavaHome; + } + + private static String getJavaHomeEnvVarName(String version) { + return "JAVA" + version + "_HOME"; + } + + private static String getResourceContents(String resourcePath) { + try (BufferedReader reader = new BufferedReader( + new InputStreamReader(GlobalBuildInfoPlugin.class.getResourceAsStream(resourcePath)) + )) { + StringBuilder b = new StringBuilder(); + for (String line = reader.readLine(); line != null; line = reader.readLine()) { + if (b.length() != 0) { + b.append('\n'); + } + b.append(line); + } + + return b.toString(); + } catch (IOException e) { + throw new UncheckedIOException("Error trying to read classpath resource: " + resourcePath, e); + } + } + + private static int findDefaultParallel(Project project) { + // Since it costs IO to compute this, and is done at configuration time we want to cache this if possible + // It's safe to store this in a static variable since it's just a primitive so leaking memory isn't an issue + if (_defaultParallel == null) { + File cpuInfoFile = new File("/proc/cpuinfo"); + if (cpuInfoFile.exists()) { + // Count physical cores on any Linux distro ( don't count hyper-threading ) + Map socketToCore = new HashMap<>(); + String currentID = ""; + + try (BufferedReader reader = new BufferedReader(new FileReader(cpuInfoFile))) { + for (String line = reader.readLine(); line != null; line = reader.readLine()) { + if (line.contains(":")) { + List parts = Arrays.stream(line.split(":", 2)).map(String::trim).collect(Collectors.toList()); + String name = parts.get(0); + String value = parts.get(1); + // the ID of the CPU socket + if (name.equals("physical id")) { + currentID = value; + } + // Number of cores not including hyper-threading + if (name.equals("cpu cores")) { + assert currentID.isEmpty() == false; + socketToCore.put("currentID", Integer.valueOf(value)); + currentID = ""; + } + } + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + _defaultParallel = socketToCore.values().stream().mapToInt(i -> i).sum(); + } else if (OS.current() == OS.MAC) { + // Ask macOS to count physical CPUs for us + ByteArrayOutputStream stdout = new ByteArrayOutputStream(); + project.exec(spec -> { + spec.setExecutable("sysctl"); + spec.args("-n", "hw.physicalcpu"); + spec.setStandardOutput(stdout); + }); + + _defaultParallel = Integer.parseInt(stdout.toString().trim()); + } + + _defaultParallel = Runtime.getRuntime().availableProcessors() / 2; + } + + return _defaultParallel; + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalInfoExtension.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalInfoExtension.java new file mode 100644 index 0000000000000..a2daa4a5767c0 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalInfoExtension.java @@ -0,0 +1,12 @@ +package org.elasticsearch.gradle.info; + +import java.util.ArrayList; +import java.util.List; + +public class GlobalInfoExtension { + final List listeners = new ArrayList<>(); + + public void ready(Runnable block) { + listeners.add(block); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/JavaHome.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/JavaHome.java new file mode 100644 index 0000000000000..29ca2bafc79dc --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/JavaHome.java @@ -0,0 +1,35 @@ +package org.elasticsearch.gradle.info; + +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputDirectory; +import org.gradle.api.tasks.Optional; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; + +import java.io.File; + +public class JavaHome { + private Integer version; + private File javaHome; + + private JavaHome(int version, File javaHome) { + this.version = version; + this.javaHome = javaHome; + } + + public static JavaHome of(int version, File javaHome) { + return new JavaHome(version, javaHome); + } + + @Input + public Integer getVersion() { + return version; + } + + @InputDirectory + @Optional + @PathSensitive(PathSensitivity.RELATIVE) + public File getJavaHome() { + return javaHome; + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/PrintGlobalBuildInfoTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/PrintGlobalBuildInfoTask.java new file mode 100644 index 0000000000000..b83fe29b073a6 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/PrintGlobalBuildInfoTask.java @@ -0,0 +1,84 @@ +package org.elasticsearch.gradle.info; + +import org.gradle.api.DefaultTask; +import org.gradle.api.JavaVersion; +import org.gradle.api.file.RegularFileProperty; +import org.gradle.api.model.ObjectFactory; +import org.gradle.api.plugins.ExtraPropertiesExtension; +import org.gradle.api.resources.TextResource; +import org.gradle.api.tasks.InputFile; +import org.gradle.api.tasks.TaskAction; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.List; + +public class PrintGlobalBuildInfoTask extends DefaultTask { + private final RegularFileProperty buildInfoFile; + private final RegularFileProperty compilerVersionFile; + private final RegularFileProperty runtimeVersionFile; + private final RegularFileProperty fipsJvmFile; + private List globalInfoListeners = new ArrayList<>(); + + @Inject + public PrintGlobalBuildInfoTask(ObjectFactory objectFactory) { + this.buildInfoFile = objectFactory.fileProperty(); + this.compilerVersionFile = objectFactory.fileProperty(); + this.runtimeVersionFile = objectFactory.fileProperty(); + this.fipsJvmFile = objectFactory.fileProperty(); + } + + @InputFile + public RegularFileProperty getBuildInfoFile() { + return buildInfoFile; + } + + @InputFile + public RegularFileProperty getCompilerVersionFile() { + return compilerVersionFile; + } + + @InputFile + public RegularFileProperty getRuntimeVersionFile() { + return runtimeVersionFile; + } + + @InputFile + public RegularFileProperty getFipsJvmFile() { + return fipsJvmFile; + } + + public void setGlobalInfoListeners(List globalInfoListeners) { + this.globalInfoListeners = globalInfoListeners; + } + + @TaskAction + public void print() { + getLogger().quiet("======================================="); + getLogger().quiet("Elasticsearch Build Hamster says Hello!"); + getLogger().quiet(getFileText(getBuildInfoFile()).asString()); + getLogger().quiet(" Random Testing Seed : " + getProject().property("testSeed")); + getLogger().quiet("======================================="); + + setGlobalProperties(); + globalInfoListeners.forEach(Runnable::run); + + // Since all tasks depend on this task, and it always runs for every build, this makes sure that lifecycle tasks will still + // correctly report as UP-TO-DATE, since the convention is a lifecycle task (i.e. assemble, build, etc) will only be marked as + // UP-TO-DATE if all upstream tasks were also UP-TO-DATE. + setDidWork(false); + } + + private TextResource getFileText(RegularFileProperty regularFileProperty) { + return getProject().getResources().getText().fromFile(regularFileProperty.getAsFile().get()); + } + + private void setGlobalProperties() { + getProject().getRootProject().allprojects(p -> { + ExtraPropertiesExtension ext = p.getExtensions().getByType(ExtraPropertiesExtension.class); + ext.set("compilerJavaVersion", JavaVersion.valueOf(getFileText(getCompilerVersionFile()).asString())); + ext.set("runtimeJavaVersion", JavaVersion.valueOf(getFileText(getRuntimeVersionFile()).asString())); + ext.set("inFipsJvm", Boolean.valueOf(getFileText(getFipsJvmFile()).asString())); + }); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java index e73a9d1e585e3..7ddec2b887ec6 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.java @@ -26,6 +26,7 @@ import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Dependency; import org.gradle.api.file.FileTree; +import org.gradle.api.provider.Property; import org.gradle.api.specs.Spec; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Classpath; @@ -79,17 +80,13 @@ public class ThirdPartyAuditTask extends DefaultTask { private String javaHome; - private JavaVersion targetCompatibility; + private final Property targetCompatibility = getProject().getObjects().property(JavaVersion.class); @Input - public JavaVersion getTargetCompatibility() { + public Property getTargetCompatibility() { return targetCompatibility; } - public void setTargetCompatibility(JavaVersion targetCompatibility) { - this.targetCompatibility = targetCompatibility; - } - @InputFiles @PathSensitive(PathSensitivity.NAME_ONLY) public Configuration getForbiddenAPIsConfiguration() { @@ -287,7 +284,7 @@ private void extractJars(Set jars) { // pther version specific implementation of said classes. IntStream.rangeClosed( Integer.parseInt(JavaVersion.VERSION_1_9.getMajorVersion()), - Integer.parseInt(targetCompatibility.getMajorVersion()) + Integer.parseInt(targetCompatibility.get().getMajorVersion()) ).forEach(majorVersion -> getProject().copy(spec -> { spec.from(getProject().zipTree(jar)); spec.into(jarExpandDir); diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.global-build-info.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.global-build-info.properties new file mode 100644 index 0000000000000..7428707877242 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.global-build-info.properties @@ -0,0 +1 @@ +implementation-class=org.elasticsearch.gradle.info.GlobalBuildInfoPlugin \ No newline at end of file diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 61e3546ed8919..48bc899cd29b4 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -40,8 +40,8 @@ test { systemProperty 'tests.security.manager', 'false' } -if (project.inFipsJvm) { +thirdPartyAudit.onlyIf { // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, // rather than provide a long list of exclusions, disable the check on FIPS. - thirdPartyAudit.enabled = false -} + project.inFipsJvm == false +} \ No newline at end of file diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 23de6a7f93b4f..d64e0aff7749d 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -172,10 +172,12 @@ thirdPartyAudit { ) } -if (project.inFipsJvm == false) { - // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in - // a FIPS JVM with BouncyCastleFIPS Provider - thirdPartyAudit.ignoreMissingClasses ( - 'org.bouncycastle.asn1.x500.X500Name' - ) +rootProject.globalInfo.ready { + if (project.inFipsJvm == false) { + // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in + // a FIPS JVM with BouncyCastleFIPS Provider + thirdPartyAudit.ignoreMissingClasses( + 'org.bouncycastle.asn1.x500.X500Name' + ) + } } diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index cbe417708d778..835147c255c8b 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -84,8 +84,8 @@ thirdPartyAudit{ ignoreMissingClasses() } -if (project.inFipsJvm) { - // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, - // rather than provide a long list of exclusions, disable the check on FIPS. - thirdPartyAudit.enabled = false -} +thirdPartyAudit.onlyIf { + // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + project.inFipsJvm == false +} \ No newline at end of file diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 9f93d18a0e15e..7800ff6951a89 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -149,10 +149,12 @@ thirdPartyAudit { 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator' ) } -if (project.inFipsJvm == false) { - // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in - // a FIPS JVM with BouncyCastleFIPS Provider - thirdPartyAudit.ignoreMissingClasses ( - 'org.bouncycastle.asn1.x500.X500Name' - ) +rootProject.globalInfo.ready { + if (project.inFipsJvm == false) { + // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in + // a FIPS JVM with BouncyCastleFIPS Provider + thirdPartyAudit.ignoreMissingClasses( + 'org.bouncycastle.asn1.x500.X500Name' + ) + } } diff --git a/server/build.gradle b/server/build.gradle index ce5e4cc807fdf..391fdf46469f0 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -56,8 +56,10 @@ if (!isEclipse && !isIdea) { } forbiddenApisJava12 { - if (project.runtimeJavaVersion < JavaVersion.VERSION_12) { - targetCompatibility = JavaVersion.VERSION_12.getMajorVersion() + doFirst { + if (project.runtimeJavaVersion < JavaVersion.VERSION_12) { + targetCompatibility = JavaVersion.VERSION_12.getMajorVersion() + } } } diff --git a/x-pack/plugin/ccr/qa/restart/build.gradle b/x-pack/plugin/ccr/qa/restart/build.gradle index 8501de714fae6..cace98d97b015 100644 --- a/x-pack/plugin/ccr/qa/restart/build.gradle +++ b/x-pack/plugin/ccr/qa/restart/build.gradle @@ -41,7 +41,7 @@ followClusterTestRunner { task followClusterRestartTest(type: RestIntegTestTask) {} followClusterRestartTestCluster { - dependsOn followClusterTestRunner + dependsOn followClusterTestRunner, 'followClusterTestCluster#stop' numNodes = 1 clusterName = 'follow-cluster' dataDir = { nodeNumber -> followClusterTest.nodes[0].dataDir } diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 29f278b95defa..5a95594b292ed 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -22,16 +22,18 @@ dependencyLicenses { mapping from: /bc.*/, to: 'bouncycastle' } -if (project.inFipsJvm) { - test.enabled = false - testingConventions.enabled = false - // Forbiden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are - // not part of the Java specification - all of this is as designed, so we have to relax this check for FIPS. - tasks.withType(CheckForbiddenApis) { - bundledSignatures -= "jdk-non-portable" - } - // FIPS JVM includes many classes from bouncycastle which count as jar hell for the third party audit, - // rather than provide a long list of exclusions, disable the check on FIPS. - thirdPartyAudit.enabled = false +rootProject.globalInfo.ready { + if (project.inFipsJvm) { + test.enabled = false + testingConventions.enabled = false + // Forbiden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are + // not part of the Java specification - all of this is as designed, so we have to relax this check for FIPS. + tasks.withType(CheckForbiddenApis) { + bundledSignatures -= "jdk-non-portable" + } + // FIPS JVM includes many classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + thirdPartyAudit.enabled = false -} + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/security/with-ssl/build.gradle b/x-pack/plugin/sql/qa/security/with-ssl/build.gradle index de4e173463612..19459bade97a8 100644 --- a/x-pack/plugin/sql/qa/security/with-ssl/build.gradle +++ b/x-pack/plugin/sql/qa/security/with-ssl/build.gradle @@ -207,18 +207,16 @@ integTestCluster { return tmpFile.exists() } } -Closure notRunningFips = { - Boolean.parseBoolean(BuildPlugin.runJavaAsScript(project, project.runtimeJavaHome, - 'print(java.security.Security.getProviders()[0].name.toLowerCase().contains("fips"));')) == false -} // Do not attempt to form a cluster in a FIPS JVM, as doing so with a JKS keystore will fail. // TODO Revisit this when SQL CLI client can handle key/certificate instead of only Keystores. // https://github.com/elastic/elasticsearch/issues/32306 -tasks.matching({ it.name == "integTestCluster#init" }).all { onlyIf notRunningFips } -tasks.matching({ it.name == "integTestCluster#start" }).all { onlyIf notRunningFips } -tasks.matching({ it.name == "integTestCluster#wait" }).all { onlyIf notRunningFips } -tasks.matching({ it.name == "integTestRunner" }).all { onlyIf notRunningFips } +tasks.matching { it.name in ["integTestCluster#init", "integTestCluster#start", "integTestCluster#wait", "integTestRunner"] }.all { + onlyIf { + project.inFipsJvm == false + } +} + /** A lazy evaluator to find the san to use for certificate generation. */ class SanEvaluator { diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 7f0e14d2a53bf..70767faf33499 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -116,13 +116,15 @@ for (Version version : bwcVersions.indexCompatible) { setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + rootProject.globalInfo.ready { + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } } setting 'xpack.license.self_generated.type', 'trial' dependsOn copyTestNodeKeyMaterial @@ -160,13 +162,15 @@ for (Version version : bwcVersions.indexCompatible) { // some tests rely on the translog not being flushed setting 'indices.memory.shard_inactive_time', '20m' setting 'xpack.security.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + rootProject.globalInfo.ready { + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } } setting 'xpack.license.self_generated.type', 'trial' dependsOn copyTestNodeKeyMaterial diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 5a201832e7c39..7f878e6356b73 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -41,8 +41,10 @@ integTestCluster { setting 'reindex.ssl.truststore.password', 'password' // Workaround for JDK-8212885 - if (project.ext.runtimeJavaVersion.isJava12Compatible() == false) { - setting 'reindex.ssl.supported_protocols', 'TLSv1.2' + rootProject.globalInfo.ready { + if (project.ext.runtimeJavaVersion.isJava12Compatible() == false) { + setting 'reindex.ssl.supported_protocols', 'TLSv1.2' + } } extraConfigFile 'roles.yml', 'roles.yml' diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 8d5b133454a73..d75ecbd7a55ed 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -123,13 +123,15 @@ for (Version version : bwcVersions.wireCompatible) { setting 'xpack.security.authc.token.timeout', '60m' setting 'logger.org.elasticsearch.xpack.security.authc.TokenService', 'trace' setting 'xpack.security.audit.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + rootProject.globalInfo.ready { + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } } dependsOn copyTestNodeKeyMaterial extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') @@ -188,13 +190,15 @@ for (Version version : bwcVersions.wireCompatible) { setting 'xpack.security.transport.ssl.enabled', 'true' setting 'xpack.security.authc.token.timeout', '60m' setting 'logger.org.elasticsearch.xpack.security.authc.TokenService', 'trace' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + rootProject.globalInfo.ready { + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } } setting 'node.attr.upgraded', 'true' setting 'xpack.security.authc.token.enabled', 'true' From 5f3d0e4ab16c45221e406839d009f1f16bb7aab2 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 24 May 2019 15:42:59 -0400 Subject: [PATCH 266/321] Adjust load SplitIndexIT#testSplitIndexPrimaryTerm (#42477) SplitIndexIT#testSplitIndexPrimaryTerm sometimes timeout due to relocating many shards. This change adjusts loads and increases the timeout. --- .../admin/indices/create/SplitIndexIT.java | 32 ++++--------------- 1 file changed, 7 insertions(+), 25 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 05d1c5dcd803f..0fecff449f9b5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -45,8 +45,8 @@ import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -62,7 +62,6 @@ import java.io.UncheckedIOException; import java.util.Arrays; import java.util.HashSet; -import java.util.List; import java.util.Set; import java.util.function.BiFunction; import java.util.stream.IntStream; @@ -75,7 +74,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class SplitIndexIT extends ESIntegTestCase { @@ -184,9 +182,6 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha } } - ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() - .getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); ensureYellow(); client().admin().indices().prepareUpdateSettings("source") .setSettings(Settings.builder() @@ -287,19 +282,13 @@ public void assertAllUniqueDocs(SearchResponse response, int numDocs) { } public void testSplitIndexPrimaryTerm() throws Exception { - final List factors = Arrays.asList(1, 2, 4, 8); - final List numberOfShardsFactors = randomSubsetOf(scaledRandomIntBetween(1, factors.size()), factors); - final int numberOfShards = randomSubsetOf(numberOfShardsFactors).stream().reduce(1, (x, y) -> x * y); - final int numberOfTargetShards = numberOfShardsFactors.stream().reduce(2, (x, y) -> x * y); + int numberOfTargetShards = randomIntBetween(2, 20); + int numberOfShards = randomValueOtherThanMany(n -> numberOfTargetShards % n != 0, () -> between(1, numberOfTargetShards - 1)); internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) .put("number_of_shards", numberOfShards) .put("index.number_of_routing_shards", numberOfTargetShards)).get(); - - final ImmutableOpenMap dataNodes = - client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); - assertThat(dataNodes.size(), greaterThanOrEqualTo(2)); - ensureYellow(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards // fail random primary shards to force primary terms to increase final Index source = resolveIndex("source"); @@ -352,7 +341,7 @@ public void testSplitIndexPrimaryTerm() throws Exception { .setResizeType(ResizeType.SPLIT) .setSettings(splitSettings).get()); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards final IndexMetaData aftersplitIndexMetaData = indexMetaData(client(), "target"); for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { @@ -365,9 +354,7 @@ private static IndexMetaData indexMetaData(final Client client, final String ind return clusterStateResponse.getState().metaData().index(index); } - public void testCreateSplitIndex() { - internalCluster().ensureAtLeastNumDataNodes(2); - + public void testCreateSplitIndex() throws Exception { Version version = VersionUtils.randomIndexCompatibleVersion(random()); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) .put("number_of_shards", 1) @@ -378,9 +365,7 @@ public void testCreateSplitIndex() { client().prepareIndex("source", "type") .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } - ImmutableOpenMap dataNodes = - client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + internalCluster().ensureAtLeastNumDataNodes(2); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due // to the require._name below. @@ -486,9 +471,6 @@ public void testCreateSplitWithIndexSort() throws Exception { client().prepareIndex("source", "type", Integer.toString(i)) .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); } - ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() - .getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due // to the require._name below. From 8bbd3e3096295ce20d79b210dcd04a851b0f2736 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Fri, 24 May 2019 15:01:26 -0500 Subject: [PATCH 267/321] Address test failures for SmokeTestWatcherWithSecurityIT (#42092) * Address test failures for SmokeTestWatcherWithSecurityIT There are likely multiple root causes to the seemingly random failures generated by SmokeTestWatcherWithSecurityIT. This commit un-mutes this this test, address one known cause and adds debug logging for this test. The known root cause for one failure is that we can have a Watch running that is reading data from an index. Before we stop Watcher we delete that index. If Watcher happens to execute after deletion of the index but before the stop of Watcher the test can fail. The fix here is to simply move the index deletion after the stop of Watcher. Related #35361 Related #30777 Related #33291 Related #29893 --- x-pack/qa/smoke-test-watcher-with-security/build.gradle | 2 ++ .../smoketest/SmokeTestWatcherWithSecurityIT.java | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/x-pack/qa/smoke-test-watcher-with-security/build.gradle b/x-pack/qa/smoke-test-watcher-with-security/build.gradle index 0b622fc446b38..fc66785d47957 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/build.gradle +++ b/x-pack/qa/smoke-test-watcher-with-security/build.gradle @@ -24,6 +24,8 @@ integTestCluster { setting 'xpack.notification.email.account._email.smtp.user', '_user' keystoreSetting 'xpack.notification.email.account._email.smtp.secure_password', '_passwd' setting 'xpack.license.self_generated.type', 'trial' + setting 'logger.org.elasticsearch.xpack.watcher', 'debug' + setting 'logger.org.elasticsearch.xpack.core.watcher', 'debug' extraConfigFile 'roles.yml', 'roles.yml' setupCommand 'setupTestAdminUser', 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index bf53dfa83103e..e184ef19596af 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.smoketest; import org.apache.http.util.EntityUtils; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; @@ -32,7 +31,6 @@ import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.is; -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35361") public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { private static final String TEST_ADMIN_USERNAME = "test_admin"; @@ -91,7 +89,6 @@ public void startWatcher() throws Exception { @After public void stopWatcher() throws Exception { - adminClient().performRequest(new Request("DELETE", "/my_test_index")); assertBusy(() -> { try { @@ -119,6 +116,8 @@ public void stopWatcher() throws Exception { throw new AssertionError(e); } }); + + adminClient().performRequest(new Request("DELETE", "/my_test_index")); } @Override From a7cf2994cec34f5131f8004da4e5123d2ae98aeb Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 13:19:56 -0700 Subject: [PATCH 268/321] Fix issue with using runtime JDK of Java12 --- .../gradle/precommit/PrecommitTasks.groovy | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index e5e4f021507f9..7e8fbd0658698 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -136,12 +136,10 @@ class PrecommitTasks { // we need to defer this configuration since we don't know the runtime java version until execution time targetCompatibility = project.runtimeJavaVersion.getMajorVersion() if (project.runtimeJavaVersion > JavaVersion.VERSION_11) { - doLast { - project.logger.info( - "Forbidden APIs does not support java version past 11. Will use the signatures from 11 for ", - project.runtimeJavaVersion - ) - } + project.logger.info( + "Forbidden APIs does not support java version past 11. Will use the signatures from 11 for ", + project.runtimeJavaVersion + ) targetCompatibility = JavaVersion.VERSION_11.getMajorVersion() } } From 9772574f9d0b942a1ee8dba5ff503b4cd286e36c Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 13:46:59 -0700 Subject: [PATCH 269/321] Use reproducible method of generating properties file for better caching (#42539) --- buildSrc/build.gradle | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index f239427330c58..d3a16f55277d7 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -45,21 +45,17 @@ if (project == rootProject) { // we update the version property to reflect if we are building a snapshot or a release build // we write this back out below to load it in the Build.java which will be shown in rest main action // to indicate this being a snapshot build or a release build. -File propsFile = project.file('version.properties') -Properties props = VersionPropertiesLoader.loadBuildSrcVersion(propsFile) +Properties props = VersionPropertiesLoader.loadBuildSrcVersion(project.file('version.properties')) version = props.getProperty("elasticsearch") + +task generateVersionProperties(type: WriteProperties) { + outputFile = "${buildDir}/version.properties" + comment = 'Generated version properties' + properties(props) +} + processResources { - inputs.file(propsFile) - // We need to be explicit with the version because we add snapshot and qualifier to it based on properties - inputs.property("dynamic_elasticsearch_version", props.getProperty("elasticsearch")) - doLast { - Writer writer = file("$destinationDir/version.properties").newWriter() - try { - props.store(writer, "Generated version properties") - } finally { - writer.close() - } - } + from(generateVersionProperties) } /***************************************************************************** From 5b0b98b7c79ef1de84d216ba57ceb852bfa86c08 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 21 May 2019 17:20:27 -0400 Subject: [PATCH 270/321] Recovery with syncId should verify seqno infos (#41265) This change verifies and aborts recovery if source and target have the same syncId but different sequenceId. This commit also adds an upgrade test to ensure that we always utilize syncId. --- .../upgrades/AbstractRollingTestCase.java | 1 + .../elasticsearch/upgrades/RecoveryIT.java | 127 +++++++++++++++--- .../recovery/RecoverySourceHandler.java | 46 ++++--- .../recovery/RecoverySourceHandlerTests.java | 39 ++++++ 4 files changed, 174 insertions(+), 39 deletions(-) diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index 1c57be7abbaa1..f6041bc2af754 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -42,6 +42,7 @@ public static ClusterType parse(String value) { } protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite")); + protected static final boolean firstMixedRound = Boolean.parseBoolean(System.getProperty("tests.first_round", "false")); @Override protected final boolean preserveIndicesUponCompletion() { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index bbc6d27472467..32ddc77113bc8 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.action.document.RestIndexAction; import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.hamcrest.Matcher; import java.io.IOException; import java.util.ArrayList; @@ -40,6 +41,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; @@ -172,6 +174,25 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { } } + private void assertDocCountOnAllCopies(String index, int expectedCount) throws Exception { + assertBusy(() -> { + Map state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); + String xpath = "routing_table.indices." + index + ".shards.0.node"; + @SuppressWarnings("unchecked") List assignedNodes = (List) XContentMapValues.extractValue(xpath, state); + assertNotNull(state.toString(), assignedNodes); + for (String assignedNode : assignedNodes) { + try { + assertCount(index, "_only_nodes:" + assignedNode, expectedCount); + } catch (ResponseException e) { + if (e.getMessage().contains("no data nodes with criteria [" + assignedNode + "found for shard: [" + index + "][0]")) { + throw new AssertionError(e); // shard is relocating - ask assert busy to retry + } + throw e; + } + } + }); + } + private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { final int actualDocs; try { @@ -275,34 +296,52 @@ public void testRelocationWithConcurrentIndexing() throws Exception { } } + /** + * This test ensures that peer recovery won't get stuck in a situation where the recovery target and recovery source + * have an identical sync id but different local checkpoint in the commit in particular the target does not have + * sequence numbers yet. This is possible if the primary is on 6.x while the replica was on 5.x and some write + * operations with sequence numbers have taken place. If this is not the case, then peer recovery should utilize + * syncId and skip copying files. + */ public void testRecoverSyncedFlushIndex() throws Exception { final String index = "recover_synced_flush_index"; if (CLUSTER_TYPE == ClusterType.OLD) { Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - // if the node with the replica is the first to be restarted, while a replica is still recovering - // then delayed allocation will kick in. When the node comes back, the master will search for a copy - // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN - // before timing out - .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") - .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING.getKey(), "256b"); + } createIndex(index, settings.build()); - indexDocs(index, 0, randomInt(5)); - // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. - // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. - assertBusy(() -> { - try { - Response resp = client().performRequest(new Request("POST", index + "/_flush/synced")); - Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); - assertThat(result.get("successful"), equalTo(result.get("total"))); - assertThat(result.get("failed"), equalTo(0)); - } catch (ResponseException ex) { - throw new AssertionError(ex); // cause assert busy to retry + ensureGreen(index); + indexDocs(index, 0, 40); + syncedFlush(index); + } else if (CLUSTER_TYPE == ClusterType.MIXED) { + ensureGreen(index); + if (firstMixedRound) { + assertPeerRecoveredFiles("peer recovery with syncId should not copy files", index, "upgraded-node-0", equalTo(0)); + assertDocCountOnAllCopies(index, 40); + indexDocs(index, 40, 50); + syncedFlush(index); + } else { + assertPeerRecoveredFiles("peer recovery with syncId should not copy files", index, "upgraded-node-1", equalTo(0)); + assertDocCountOnAllCopies(index, 90); + indexDocs(index, 90, 60); + syncedFlush(index); + // exclude node-2 from allocation-filter so we can trim translog on the primary before node-2 starts recover + if (randomBoolean()) { + updateIndexSettings(index, Settings.builder().put("index.routing.allocation.include._name", "upgraded-*")); } - }); + } + } else { + final int docsAfterUpgraded = randomIntBetween(0, 100); + indexDocs(index, 150, docsAfterUpgraded); + ensureGreen(index); + assertPeerRecoveredFiles("peer recovery with syncId should not copy files", index, "upgraded-node-2", equalTo(0)); + assertDocCountOnAllCopies(index, 150 + docsAfterUpgraded); } - ensureGreen(index); } public void testRecoveryWithSoftDeletes() throws Exception { @@ -480,4 +519,52 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab assertThat(XContentMapValues.extractValue("index.verified_before_close", settings), nullValue()); } } + + private void syncedFlush(String index) throws Exception { + // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. + // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. + assertBusy(() -> { + try { + Response resp = client().performRequest(new Request("POST", index + "/_flush/synced")); + Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); + assertThat(result.get("failed"), equalTo(0)); + } catch (ResponseException ex) { + throw new AssertionError(ex); // cause assert busy to retry + } + }); + // ensure the global checkpoint is synced; otherwise we might trim the commit with syncId + ensureGlobalCheckpointSynced(index); + } + + @SuppressWarnings("unchecked") + private void assertPeerRecoveredFiles(String reason, String index, String targetNode, Matcher sizeMatcher) throws IOException { + Map recoveryStats = entityAsMap(client().performRequest(new Request("GET", index + "/_recovery"))); + List> shards = (List>) XContentMapValues.extractValue(index + "." + "shards", recoveryStats); + for (Map shard : shards) { + if (Objects.equals(XContentMapValues.extractValue("type", shard), "PEER")) { + if (Objects.equals(XContentMapValues.extractValue("target.name", shard), targetNode)) { + Integer recoveredFileSize = (Integer) XContentMapValues.extractValue("index.files.recovered", shard); + assertThat(reason + " target node [" + targetNode + "] stats [" + recoveryStats + "]", recoveredFileSize, sizeMatcher); + } + } + } + } + + @SuppressWarnings("unchecked") + private void ensureGlobalCheckpointSynced(String index) throws Exception { + assertBusy(() -> { + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + List> shardStats = (List>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats); + shardStats.stream() + .map(shard -> (Map) XContentMapValues.extractValue("seq_no", shard)) + .filter(Objects::nonNull) + .forEach(seqNoStat -> { + long globalCheckpoint = ((Number) XContentMapValues.extractValue("global_checkpoint", seqNoStat)).longValue(); + long localCheckpoint = ((Number) XContentMapValues.extractValue("local_checkpoint", seqNoStat)).longValue(); + long maxSeqNo = ((Number) XContentMapValues.extractValue("max_seq_no", seqNoStat)).longValue(); + assertThat(shardStats.toString(), localCheckpoint, equalTo(maxSeqNo)); + assertThat(shardStats.toString(), globalCheckpoint, equalTo(maxSeqNo)); + }); + }, 60, TimeUnit.SECONDS); + } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 4e82798e34128..4b89e75691a76 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -359,25 +359,10 @@ public SendFileResult phase1(final IndexCommit snapshot, final long globalCheckp recoverySourceMetadata.asMap().size() + " files", name); } } - // Generate a "diff" of all the identical, different, and missing - // segment files on the target node, using the existing files on - // the source node - String recoverySourceSyncId = recoverySourceMetadata.getSyncId(); - String recoveryTargetSyncId = request.metadataSnapshot().getSyncId(); - final boolean recoverWithSyncId = recoverySourceSyncId != null && - recoverySourceSyncId.equals(recoveryTargetSyncId); - if (recoverWithSyncId) { - final long numDocsTarget = request.metadataSnapshot().getNumDocs(); - final long numDocsSource = recoverySourceMetadata.getNumDocs(); - if (numDocsTarget != numDocsSource) { - throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " + - "of docs differ: " + numDocsSource + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsTarget - + "(" + request.targetNode().getName() + ")"); - } - // we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target. - // so we don't return here - logger.trace("skipping [phase1]- identical sync id [{}] found on both source and target", recoverySourceSyncId); - } else { + if (canSkipPhase1(recoverySourceMetadata, request.metadataSnapshot()) == false) { + // Generate a "diff" of all the identical, different, and missing + // segment files on the target node, using the existing files on + // the source node final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(request.metadataSnapshot()); for (StoreFileMetaData md : diff.identical) { phase1ExistingFileNames.add(md.name()); @@ -458,6 +443,9 @@ public SendFileResult phase1(final IndexCommit snapshot, final long globalCheckp throw targetException; } } + } else { + logger.trace("skipping [phase1]- identical sync id [{}] found on both source and target", + recoverySourceMetadata.getSyncId()); } final TimeValue took = stopWatch.totalTime(); logger.trace("recovery [phase1]: took [{}]", took); @@ -470,6 +458,26 @@ public SendFileResult phase1(final IndexCommit snapshot, final long globalCheckp } } + boolean canSkipPhase1(Store.MetadataSnapshot source, Store.MetadataSnapshot target) { + if (source.getSyncId() == null || source.getSyncId().equals(target.getSyncId()) == false) { + return false; + } + if (source.getNumDocs() != target.getNumDocs()) { + throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " + + "of docs differ: " + source.getNumDocs() + " (" + request.sourceNode().getName() + ", primary) vs " + target.getNumDocs() + + "(" + request.targetNode().getName() + ")"); + } + SequenceNumbers.CommitInfo sourceSeqNos = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(source.getCommitUserData().entrySet()); + SequenceNumbers.CommitInfo targetSeqNos = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(target.getCommitUserData().entrySet()); + if (sourceSeqNos.localCheckpoint != targetSeqNos.localCheckpoint || targetSeqNos.maxSeqNo != sourceSeqNos.maxSeqNo) { + final String message = "try to recover " + request.shardId() + " with sync id but " + + "seq_no stats are mismatched: [" + source.getCommitUserData() + "] vs [" + target.getCommitUserData() + "]"; + assert false : message; + throw new IllegalStateException(message); + } + return true; + } + void prepareTargetForTranslog(boolean fileBasedRecovery, int totalTranslogOps, ActionListener listener) { StopWatch stopWatch = new StopWatch().start(); final ActionListener wrappedListener = ActionListener.wrap( diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index b49bef57aceb1..b00e89575ccd5 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -89,7 +89,9 @@ import java.util.Arrays; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -649,6 +651,43 @@ public void writeFileChunk(StoreFileMetaData md, long position, BytesReference c store.close(); } + public void testVerifySeqNoStatsWhenRecoverWithSyncId() throws Exception { + IndexShard shard = mock(IndexShard.class); + when(shard.state()).thenReturn(IndexShardState.STARTED); + RecoverySourceHandler handler = new RecoverySourceHandler( + shard, new TestRecoveryTargetHandler(), getStartRecoveryRequest(), between(1, 16), between(1, 4)); + + String syncId = UUIDs.randomBase64UUID(); + int numDocs = between(0, 1000); + long localCheckpoint = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + long maxSeqNo = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + assertTrue(handler.canSkipPhase1( + newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs), + newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs))); + + AssertionError error = expectThrows(AssertionError.class, () -> { + long localCheckpointOnTarget = randomValueOtherThan(localCheckpoint, + () -> randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE)); + long maxSeqNoOnTarget = randomValueOtherThan(maxSeqNo, + () -> randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE)); + handler.canSkipPhase1(newMetadataSnapshot(syncId, Long.toString(localCheckpoint), Long.toString(maxSeqNo), numDocs), + newMetadataSnapshot(syncId, Long.toString(localCheckpointOnTarget), Long.toString(maxSeqNoOnTarget), numDocs)); + }); + assertThat(error.getMessage(), containsString("try to recover [index][1] with sync id but seq_no stats are mismatched:")); + } + + private Store.MetadataSnapshot newMetadataSnapshot(String syncId, String localCheckpoint, String maxSeqNo, int numDocs) { + Map userData = new HashMap<>(); + userData.put(Engine.SYNC_COMMIT_ID, syncId); + if (localCheckpoint != null) { + userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, localCheckpoint); + } + if (maxSeqNo != null) { + userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, maxSeqNo); + } + return new Store.MetadataSnapshot(Collections.emptyMap(), userData, numDocs); + } + private Store newStore(Path path) throws IOException { return newStore(path, true); } From 6362328b93d0bb972d5c9ffd1fcceee9a3a47404 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 24 May 2019 15:39:27 -0700 Subject: [PATCH 271/321] Remove renewal in retention lease recovery test (#42536) This commit removes the act of renewing some retention leases during a retention lease recovery test. Having renewal does not add anything extra to this test, but does allow for some situations where the test can fail spuriously (i.e., in a way that does not indicate that production code is broken). --- .../java/org/elasticsearch/index/seqno/RetentionLeaseIT.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index bbe05accb2813..debb6d219a5f1 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -36,7 +36,6 @@ import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -356,7 +355,6 @@ public void testRetentionLeasesBackgroundSyncWithSoftDeletesDisabled() throws Ex assertFalse("retention leases background sync must be a noop if soft deletes is disabled", backgroundSyncRequestSent.get()); } - @TestLogging(value = "org.elasticsearch.index:debug,org.elasticsearch.indices.recovery:trace") public void testRetentionLeasesSyncOnRecovery() throws Exception { final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); @@ -393,7 +391,6 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { final ActionListener listener = countDownLatchListener(latch); currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); latch.await(); - currentRetentionLeases.put(id, primary.renewRetentionLease(id, retainingSequenceNumber, source)); } logger.info("finished adding [{}] retention leases", length); From d5281fc96f6fb2f022c87699bdad64d88614e04c Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 18:08:51 -0700 Subject: [PATCH 272/321] Ignore JAR manifests when snapshotting runtime classpaths (#42548) --- .../elasticsearch/gradle/BuildPlugin.groovy | 8 +++++ .../test/StandaloneRestTestPlugin.groovy | 1 + .../org/elasticsearch/gradle/LoggedExec.java | 36 +++++++++++-------- plugins/discovery-azure-classic/build.gradle | 14 ++++++-- 4 files changed, 41 insertions(+), 18 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 3a058ca9310df..b5c69a418cceb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -129,6 +129,7 @@ class BuildPlugin implements Plugin { setupSeed(project) configureRepositories(project) project.extensions.getByType(ExtraPropertiesExtension).set('versions', VersionProperties.versions) + configureInputNormalization(project) configureSourceSets(project) configureCompile(project) configureJavadoc(project) @@ -580,6 +581,13 @@ class BuildPlugin implements Plugin { } } + /** + * Apply runtime classpath input normalization so that changes in JAR manifests don't break build cacheability + */ + static void configureInputNormalization(Project project) { + project.normalization.runtimeClasspath.ignore('META-INF/MANIFEST.MF') + } + /** Adds compiler settings to the project */ static void configureCompile(Project project) { ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index 6d895abaa97c7..c9a26eb74b54d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -62,6 +62,7 @@ class StandaloneRestTestPlugin implements Plugin { project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask) BuildPlugin.configureRepositories(project) BuildPlugin.configureTestTasks(project) + BuildPlugin.configureInputNormalization(project) ExtraPropertiesExtension ext = project.extensions.getByType(ExtraPropertiesExtension) project.extensions.getByType(JavaPluginExtension).sourceCompatibility = ext.get('minimumRuntimeVersion') as JavaVersion diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java index c71b7ba183562..0921c611895fa 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java @@ -3,6 +3,7 @@ import org.gradle.api.Action; import org.gradle.api.GradleException; import org.gradle.api.Project; +import org.gradle.api.Task; import org.gradle.api.logging.Logger; import org.gradle.api.tasks.Exec; import org.gradle.api.tasks.Internal; @@ -34,22 +35,27 @@ public LoggedExec() { if (getLogger().isInfoEnabled() == false) { setIgnoreExitValue(true); setSpoolOutput(false); - doLast(task -> { - if (getExecResult().getExitValue() != 0) { - try { - getLogger().error("Output for " + getExecutable() + ":"); - outputLogger.accept(getLogger()); - } catch (Exception e) { - throw new GradleException("Failed to read exec output", e); + // We use an anonymous inner class here because Gradle cannot properly snapshot this input for the purposes of + // incremental build if we use a lambda. This ensures LoggedExec tasks that declare output can be UP-TO-DATE. + doLast(new Action() { + @Override + public void execute(Task task) { + if (LoggedExec.this.getExecResult().getExitValue() != 0) { + try { + LoggedExec.this.getLogger().error("Output for " + LoggedExec.this.getExecutable() + ":"); + outputLogger.accept(LoggedExec.this.getLogger()); + } catch (Exception e) { + throw new GradleException("Failed to read exec output", e); + } + throw new GradleException( + String.format( + "Process '%s %s' finished with non-zero exit value %d", + LoggedExec.this.getExecutable(), + LoggedExec.this.getArgs(), + LoggedExec.this.getExecResult().getExitValue() + ) + ); } - throw new GradleException( - String.format( - "Process '%s %s' finished with non-zero exit value %d", - getExecutable(), - getArgs(), - getExecResult().getExitValue() - ) - ); } }); } diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index d7847259defaf..cb024d8ac00d7 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -56,7 +56,7 @@ dependencies { } // needed to be consistent with ssl host checking -String host = InetAddress.getLoopbackAddress().getHostAddress(); +String host = InetAddress.getLoopbackAddress().getHostAddress() // location of keystore and files to generate it File keystore = new File(project.buildDir, 'keystore/test-node.jks') @@ -67,6 +67,7 @@ task createKey(type: LoggedExec) { project.delete(keystore.parentFile) keystore.parentFile.mkdirs() } + outputs.file(keystore).withPropertyName('keystoreFile') executable = new File(project.runtimeJavaHome, 'bin/keytool') standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) args '-genkey', @@ -81,8 +82,15 @@ task createKey(type: LoggedExec) { } // add keystore to test classpath: it expects it there -sourceSets.test.resources.srcDir(keystore.parentFile) -processTestResources.dependsOn(createKey) +processTestResources { + from createKey +} + +normalization { + runtimeClasspath { + ignore 'test-node.jks' + } +} dependencyLicenses { mapping from: /azure-.*/, to: 'azure' From 0bb46d73cb89016ab9d96e76693bb0d7cee267a1 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 18:29:40 -0700 Subject: [PATCH 273/321] Make LoggerUsageTask cacheable (#42550) --- .../gradle/precommit/PrecommitTasks.groovy | 1 - .../gradle/precommit/LoggerUsageTask.java | 27 +++++++------------ .../gradle/precommit/PrecommitTask.java | 1 - 3 files changed, 10 insertions(+), 19 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 7e8fbd0658698..3693609b4de70 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -236,7 +236,6 @@ class PrecommitTasks { "org.elasticsearch.test:logger-usage:${VersionProperties.elasticsearch}") return project.tasks.create('loggerUsageCheck', LoggerUsageTask.class) { classpath = project.configurations.loggerUsagePlugin - javaHome = project.runtimeJavaHome } } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java index fb1831bda4dcd..a730e069d5a7f 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java @@ -22,10 +22,13 @@ import org.elasticsearch.gradle.LoggedExec; import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Classpath; -import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; +import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.TaskAction; import java.io.File; @@ -33,13 +36,13 @@ /** * Runs LoggerUsageCheck on a set of directories. */ +@CacheableTask public class LoggerUsageTask extends PrecommitTask { + private FileCollection classpath; + public LoggerUsageTask() { setDescription("Runs LoggerUsageCheck on output directories of all source sets"); - getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets().all(sourceSet -> { - dependsOn(sourceSet.getClassesTaskName()); - }); } @TaskAction @@ -47,7 +50,6 @@ public void runLoggerUsageTask() { LoggedExec.javaexec(getProject(), spec -> { spec.setMain("org.elasticsearch.test.loggerusage.ESLoggerUsageChecker"); spec.classpath(getClasspath()); - spec.executable(getJavaHome() + "/bin/java"); getClassDirectories().forEach(spec::args); }); } @@ -62,26 +64,17 @@ public void setClasspath(FileCollection classpath) { } @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) @SkipWhenEmpty public FileCollection getClassDirectories() { return getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets().stream() // Don't pick up all source sets like the java9 ones as logger-check doesn't support the class format - .filter(sourceSet -> sourceSet.getName().equals("main") || sourceSet.getName().equals("test")) + .filter(sourceSet -> sourceSet.getName().equals(SourceSet.MAIN_SOURCE_SET_NAME) + || sourceSet.getName().equals(SourceSet.TEST_SOURCE_SET_NAME)) .map(sourceSet -> sourceSet.getOutput().getClassesDirs()) .reduce(FileCollection::plus) .orElse(getProject().files()) .filter(File::exists); } - @Input - public Object getJavaHome() { - return javaHome; - } - - public void setJavaHome(Object javaHome) { - this.javaHome = javaHome; - } - - private FileCollection classpath; - private Object javaHome; } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java index 6f99e901ec47a..5a29c4a4a3570 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java @@ -36,7 +36,6 @@ public File getSuccessMarker() { @TaskAction public void writeMarker() throws IOException { - getSuccessMarker().getParentFile().mkdirs(); Files.write(getSuccessMarker().toPath(), new byte[]{}, StandardOpenOption.CREATE); } From 1ed7b616afa99582d88dd82389ce510e27971119 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 24 May 2019 21:14:12 -0400 Subject: [PATCH 274/321] Adjust bwc version Relates #39687 --- .../action/admin/indices/close/CloseIndexResponse.java | 4 ++-- .../action/admin/indices/close/CloseIndexResponseTests.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index ea7d14655c594..c653c264e95db 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -60,7 +60,7 @@ public void readFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_7_2_0)) { readShardsAcknowledged(in); } - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getVersion().onOrAfter(Version.V_7_3_0)) { indices = unmodifiableList(in.readList(IndexResult::new)); } else { indices = unmodifiableList(emptyList()); @@ -73,7 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_7_2_0)) { writeShardsAcknowledged(out); } - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getVersion().onOrAfter(Version.V_7_3_0)) { out.writeList(indices); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java index 40c34af51598d..aa1c0b3bbb445 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -100,7 +100,7 @@ public void testBwcSerialization() throws Exception { } assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged())); assertThat(deserializedResponse.isShardsAcknowledged(), equalTo(response.isShardsAcknowledged())); - if (version.onOrAfter(Version.V_8_0_0)) { + if (version.onOrAfter(Version.V_7_3_0)) { assertThat(deserializedResponse.getIndices(), hasSize(response.getIndices().size())); } else { assertThat(deserializedResponse.getIndices(), empty()); From 5ca06a5cef5950ee3839321320d098f420293042 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 24 May 2019 19:28:22 -0700 Subject: [PATCH 275/321] Make JarHell task cacheable (#42551) --- .../gradle/precommit/PrecommitTasks.groovy | 12 ++++----- .../gradle/precommit/JarHellTask.java | 25 +++++++------------ 2 files changed, 14 insertions(+), 23 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 3693609b4de70..f656f177ce67b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -107,14 +107,12 @@ class PrecommitTasks { } private static Task configureJarHell(Project project) { - Task task = project.tasks.create('jarHell', JarHellTask.class) - task.classpath = project.sourceSets.test.runtimeClasspath - if (project.plugins.hasPlugin(ShadowPlugin)) { - task.classpath += project.configurations.bundle + return project.tasks.create('jarHell', JarHellTask) { task -> + task.classpath = project.sourceSets.test.runtimeClasspath + if (project.plugins.hasPlugin(ShadowPlugin)) { + task.classpath += project.configurations.bundle + } } - task.dependsOn(project.sourceSets.test.classesTaskName) - task.javaHome = project.runtimeJavaHome - return task } private static Task configureThirdPartyAudit(Project project) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java index fd5b0c5790773..c9152486a1c51 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java @@ -21,19 +21,20 @@ import org.elasticsearch.gradle.LoggedExec; import org.gradle.api.file.FileCollection; -import org.gradle.api.tasks.Classpath; -import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.CompileClasspath; import org.gradle.api.tasks.TaskAction; +import java.io.File; + /** * Runs CheckJarHell on a classpath. */ +@CacheableTask public class JarHellTask extends PrecommitTask { private FileCollection classpath; - private Object javaHome; - public JarHellTask() { setDescription("Runs CheckJarHell on the configured classpath"); } @@ -42,23 +43,15 @@ public JarHellTask() { public void runJarHellCheck() { LoggedExec.javaexec(getProject(), spec -> { spec.classpath(getClasspath()); - spec.executable(getJavaHome() + "/bin/java"); spec.setMain("org.elasticsearch.bootstrap.JarHell"); }); } - @Input - public Object getJavaHome() { - return javaHome; - } - - public void setJavaHome(Object javaHome) { - this.javaHome = javaHome; - } - - @Classpath + // We use compile classpath normalization here because class implementation changes are irrelevant for the purposes of jar hell. + // We only care about the runtime classpath ABI here. + @CompileClasspath public FileCollection getClasspath() { - return classpath.filter(file -> file.exists()); + return classpath.filter(File::exists); } public void setClasspath(FileCollection classpath) { From 13af50a3f4152995eaf4365cd1f969086eb76844 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 24 May 2019 22:13:48 -0400 Subject: [PATCH 276/321] Enable recoveries trace log in CcrRetentionLeaseIT Tracked #41679 --- .../xpack/ccr/CcrRetentionLeaseIT.java | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index 2cf6e3bdaf332..9595485e2721c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -88,7 +88,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; -@TestLogging(value = "org.elasticsearch.xpack.ccr:trace") +@TestLogging(value = "org.elasticsearch.xpack.ccr:trace,org.elasticsearch.indices.recovery:trace") public class CcrRetentionLeaseIT extends CcrIntegTestCase { public static final class RetentionLeaseRenewIntervalSettingPlugin extends Plugin { @@ -192,7 +192,7 @@ public void testRetentionLeaseIsTakenAtTheStartOfRecovery() throws Exception { final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.leases().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); @@ -268,7 +268,6 @@ public void testRetentionLeaseIsRenewedDuringRecovery() throws Exception { } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39331") public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws Exception { final String leaderIndex = "leader"; final int numberOfShards = randomIntBetween(1, 3); @@ -316,7 +315,7 @@ public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); @@ -354,7 +353,7 @@ public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws continue; } final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); @@ -392,7 +391,7 @@ public void testUnfollowRemovesRetentionLeases() throws Exception { leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); final List shardsStats = getShardsStats(stats); for (final ShardStats shardStats : shardsStats) { - assertThat(shardStats.getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); assertThat( shardStats.getRetentionLeaseStats().retentionLeases().leases().iterator().next().id(), equalTo(retentionLeaseId)); @@ -454,7 +453,7 @@ public void testUnfollowRemovesRetentionLeases() throws Exception { leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); final List afterUnfollowShardsStats = getShardsStats(afterUnfollowStats); for (final ShardStats shardStats : afterUnfollowShardsStats) { - assertThat(shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } finally { for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { @@ -605,7 +604,7 @@ public void testRetentionLeaseAdvancesWhileFollowing() throws Exception { final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.leases().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); @@ -668,7 +667,7 @@ public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws E final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); @@ -706,7 +705,7 @@ public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws E continue; } final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); @@ -912,7 +911,7 @@ public void onResponseReceived( leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); final List afterUnfollowShardsStats = getShardsStats(afterUnfollowStats); for (final ShardStats shardStats : afterUnfollowShardsStats) { - assertThat(shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } finally { for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { @@ -962,7 +961,7 @@ public void testForgetFollower() throws Exception { leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); final List afterForgetFollowerShardsStats = getShardsStats(afterForgetFollowerStats); for (final ShardStats shardStats : afterForgetFollowerShardsStats) { - assertThat(shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } @@ -982,7 +981,7 @@ private void assertRetentionLeaseRenewal( final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.leases().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); @@ -999,7 +998,7 @@ private void assertRetentionLeaseRenewal( final List shardsStats = getShardsStats(stats); for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); - assertThat(currentRetentionLeases.leases(), hasSize(1)); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); final RetentionLease retentionLease = currentRetentionLeases.leases().iterator().next(); assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); From d6d032d0c708d3ef31fe3c8ff391af42aa4afeee Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sat, 25 May 2019 05:51:39 +0200 Subject: [PATCH 277/321] Fix Test Failures from MockNioTransport Logger (#42545) * This call can fail when it tries to re-schedule the timeout check after the threadpool was shut down already failing tests with RejectedExecutionException --- .../java/org/elasticsearch/transport/nio/MockNioTransport.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 42dae39146605..9470b7548adfb 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -372,7 +372,7 @@ private void logLongRunningExecutions() { } } if (stopped == false) { - threadPool.schedule(this::logLongRunningExecutions, CHECK_INTERVAL, ThreadPool.Names.GENERIC); + threadPool.scheduleUnlessShuttingDown(CHECK_INTERVAL, ThreadPool.Names.GENERIC, this::logLongRunningExecutions); } } From f95071d0af299c3fda326cb1a3f7db18f4ddc3db Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sat, 25 May 2019 08:53:50 +0200 Subject: [PATCH 278/321] Upgrade to Netty 4.1.36 (#42543) --- buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.36.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 | 1 - .../transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.36.Final.jar.sha1 | 1 + 29 files changed, 15 insertions(+), 15 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a3214c789a47d..cbcbf00fc6d01 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -20,7 +20,7 @@ slf4j = 1.6.2 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 4.5.1 -netty = 4.1.35.Final +netty = 4.1.36.Final joda = 2.10.2 # when updating this version, you need to ensure compatibility with: diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 deleted file mode 100644 index 6112faf2d0103..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a244722975cddaef5f9bbd45e7a44d0db5f058d8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..90895a5e168c9 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +7f2db0921dd57df4db076229830ab09bba713aeb \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 deleted file mode 100644 index 811797decc1e8..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b86f6b9eedbe38d6fa0bbbefa961d566e293e13e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..efd6e5a327745 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8462116d327bb3d1ec24258071f2e7345a73dbfc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 deleted file mode 100644 index 3b0f1f7daa3c5..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f7a38b0a3ee2fff3d9dd2bb44f5e16140b70b354 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..4e86fef0e121a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +62b73d439dbddf3c0dde092b048580139695ab46 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 deleted file mode 100644 index 26576f8e9ccdd..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c776487b782046e1399b00cd40c63ef51d26e953 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..d9d50d776e9ba --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +f6f38fde652a70ea579897edc80e52353e487ae6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 deleted file mode 100644 index 0956313b2aa40..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b23efe31416942718ac46ad00bb3e91e4b3f6ab7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..d943140f3634c --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +1c38a5920a10c01b1cce4cdc964447ec76abf1b5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 deleted file mode 100644 index cdd335d059196..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d60c4f4e12f0703dff477c9bf595f37a41ecacbc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..1499233b60d33 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +e4d243fbf4e6837fa294f892bf97149e18129100 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 deleted file mode 100644 index 8f52a39c4f7a0..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -526b2646885c57adb54e2f89b2e2b80bebce3962 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..f36c1b17d74e0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8546e6be47be587acab86bbd106ca023678f07d9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 deleted file mode 100644 index 6112faf2d0103..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a244722975cddaef5f9bbd45e7a44d0db5f058d8 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..90895a5e168c9 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +7f2db0921dd57df4db076229830ab09bba713aeb \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 deleted file mode 100644 index 811797decc1e8..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b86f6b9eedbe38d6fa0bbbefa961d566e293e13e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..efd6e5a327745 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8462116d327bb3d1ec24258071f2e7345a73dbfc \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 deleted file mode 100644 index 3b0f1f7daa3c5..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f7a38b0a3ee2fff3d9dd2bb44f5e16140b70b354 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..4e86fef0e121a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +62b73d439dbddf3c0dde092b048580139695ab46 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 deleted file mode 100644 index 26576f8e9ccdd..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c776487b782046e1399b00cd40c63ef51d26e953 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..d9d50d776e9ba --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +f6f38fde652a70ea579897edc80e52353e487ae6 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 deleted file mode 100644 index 0956313b2aa40..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b23efe31416942718ac46ad00bb3e91e4b3f6ab7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..d943140f3634c --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +1c38a5920a10c01b1cce4cdc964447ec76abf1b5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 deleted file mode 100644 index cdd335d059196..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d60c4f4e12f0703dff477c9bf595f37a41ecacbc \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..1499233b60d33 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +e4d243fbf4e6837fa294f892bf97149e18129100 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 deleted file mode 100644 index 8f52a39c4f7a0..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -526b2646885c57adb54e2f89b2e2b80bebce3962 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 new file mode 100644 index 0000000000000..f36c1b17d74e0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8546e6be47be587acab86bbd106ca023678f07d9 \ No newline at end of file From 5d837fa312b0e41a77a65462667a2d92d1114567 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Sat, 25 May 2019 07:51:21 +0100 Subject: [PATCH 279/321] [ML Data Frame] Mute Data Frame tests Relates to https://github.com/elastic/elasticsearch/issues/42344 --- .../xpack/dataframe/integration/DataFrameTransformIT.java | 1 + .../xpack/dataframe/integration/DataFrameAuditorIT.java | 2 ++ .../dataframe/integration/DataFrameConfigurationIndexIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameMetaDataIT.java | 2 ++ .../xpack/dataframe/integration/DataFramePivotRestIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameTaskFailedStateIT.java | 2 ++ .../xpack/dataframe/integration/DataFrameUsageIT.java | 2 ++ 8 files changed, 15 insertions(+) diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index b98367979bff9..486ea5e5d7403 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -30,6 +30,7 @@ public void cleanTransforms() throws IOException { cleanUp(); } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public void testDataFrameTransformCrud() throws Exception { createReviewsIndex(); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java index 9884c9bb6793b..7dc79c1ae8fbe 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.junit.Before; @@ -22,6 +23,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameAuditorIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java index 681599331c8af..d7e12cf2bee4d 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java @@ -8,6 +8,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -22,6 +23,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { /** diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index d9927cd09ed8f..9bac6ca0b4049 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -21,6 +22,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_user"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java index 26a957ea055c2..5b95d1daead53 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -15,6 +16,7 @@ import java.io.IOException; import java.util.Map; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameMetaDataIT extends DataFrameRestTestCase { private boolean indicesCreated = false; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 22586a7b37d27..a0bec6ec13c34 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.junit.Before; @@ -21,6 +22,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFramePivotRestIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index 96aeeda8755f4..7b63644dd34ad 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; @@ -19,6 +20,7 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { public void testDummy() { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java index 4f209c5a9f3f4..f98fa6a271365 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -22,6 +23,7 @@ import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE; import static org.elasticsearch.xpack.dataframe.DataFrameFeatureSet.PROVIDED_STATS; +@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameUsageIT extends DataFrameRestTestCase { private boolean indicesCreated = false; From 0291f9464ebbcd4306932a459a6d99813ae72624 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 26 May 2019 11:18:39 -0400 Subject: [PATCH 280/321] Unmute FullClusterRestartIT#testClosedIndices Fixed in #39566 Closes #39576 --- .../java/org/elasticsearch/upgrades/FullClusterRestartIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 50eee32810adc..190523a3bc7c6 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -961,7 +961,6 @@ public void testSoftDeletes() throws Exception { * This test creates an index in the old cluster and then closes it. When the cluster is fully restarted in a newer version, * it verifies that the index exists and is replicated if the old version supports replication. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39576") public void testClosedIndices() throws Exception { if (isRunningAgainstOldCluster()) { createIndex(index, Settings.builder() From 70b4f67747c1de0c25ce737e0232cbb106072046 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 26 May 2019 16:03:42 -0400 Subject: [PATCH 281/321] Add debug log for retention leases (#42557) We need more information to understand why CcrRetentionLeaseIT is failing. This commit adds some debug log to retention leases and enables them in CcrRetentionLeaseIT. --- .../org/elasticsearch/index/seqno/ReplicationTracker.java | 7 ++++++- .../org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 892056674019f..cf0fe6a5d25e1 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -221,12 +221,15 @@ public synchronized Tuple getRetentionLeases(final boo .leases() .stream() .collect(Collectors.groupingBy(lease -> currentTimeMillis - lease.timestamp() > retentionLeaseMillis)); - if (partitionByExpiration.get(true) == null) { + final Collection expiredLeases = partitionByExpiration.get(true); + if (expiredLeases == null) { // early out as no retention leases have expired + logger.debug("no retention leases are expired from current retention leases [{}]", retentionLeases); return Tuple.tuple(false, retentionLeases); } final Collection nonExpiredLeases = partitionByExpiration.get(false) != null ? partitionByExpiration.get(false) : Collections.emptyList(); + logger.debug("expiring retention leases [{}] from current retention leases [{}]", expiredLeases, retentionLeases); retentionLeases = new RetentionLeases(operationPrimaryTerm, retentionLeases.version() + 1, nonExpiredLeases); return Tuple.tuple(true, retentionLeases); } @@ -255,6 +258,7 @@ public RetentionLease addRetentionLease( throw new RetentionLeaseAlreadyExistsException(id); } retentionLease = new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source); + logger.debug("adding new retention lease [{}] to current retention leases [{}]", retentionLease, retentionLeases); retentionLeases = new RetentionLeases( operationPrimaryTerm, retentionLeases.version() + 1, @@ -312,6 +316,7 @@ public void removeRetentionLease(final String id, final ActionListener Date: Mon, 27 May 2019 09:21:30 +0200 Subject: [PATCH 282/321] Improve how internal representation of pipelines are updated (#42257) If a single pipeline is updated then the internal representation of all pipelines was updated. With this change, only the internal representation of the pipelines that have been modified will be updated. Prior to this change the IngestMetadata of the previous and current cluster was used to determine whether the internal representation of pipelines should be updated. If applying the previous cluster state change failed then subsequent cluster state changes that have no changes to IngestMetadata will not attempt to update the internal representation of the pipelines. This commit, changes how the IngestService updates the internal representation by keeping track of the underlying configuration and use that to detect against the new IngestMetadata whether a pipeline configuration has been changed and if so, then the internal pipeline representation will be updated. --- .../elasticsearch/ingest/IngestService.java | 219 +++++++++++------- .../ingest/IngestServiceTests.java | 70 +++++- 2 files changed, 203 insertions(+), 86 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index b2143d72ae65f..9e7d1b7b5bdbd 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -80,7 +80,7 @@ public class IngestService implements ClusterStateApplier { // We know of all the processor factories when a node with all its plugin have been initialized. Also some // processor factories rely on other node services. Custom metadata is statically registered when classes // are loaded, so in the cluster state we just save the pipeline config and here we keep the actual pipelines around. - private volatile Map pipelines = new HashMap<>(); + private volatile Map pipelines = Map.of(); private final ThreadPool threadPool; private final IngestMetric totalMetrics = new IngestMetric(); @@ -236,7 +236,12 @@ public ClusterState execute(ClusterState currentState) { * Returns the pipeline by the specified id */ public Pipeline getPipeline(String id) { - return pipelines.get(id); + PipelineHolder holder = pipelines.get(id); + if (holder != null) { + return holder.pipeline; + } else { + return null; + } } public Map getProcessorFactories() { @@ -252,52 +257,10 @@ public IngestInfo info() { return new IngestInfo(processorInfoList); } - Map pipelines() { + Map pipelines() { return pipelines; } - @Override - public void applyClusterState(final ClusterChangedEvent event) { - ClusterState state = event.state(); - Map originalPipelines = pipelines; - try { - innerUpdatePipelines(event.previousState(), state); - } catch (ElasticsearchParseException e) { - logger.warn("failed to update ingest pipelines", e); - } - //pipelines changed, so add the old metrics to the new metrics - if (originalPipelines != pipelines) { - pipelines.forEach((id, pipeline) -> { - Pipeline originalPipeline = originalPipelines.get(id); - if (originalPipeline != null) { - pipeline.getMetrics().add(originalPipeline.getMetrics()); - List> oldPerProcessMetrics = new ArrayList<>(); - List> newPerProcessMetrics = new ArrayList<>(); - getProcessorMetrics(originalPipeline.getCompoundProcessor(), oldPerProcessMetrics); - getProcessorMetrics(pipeline.getCompoundProcessor(), newPerProcessMetrics); - //Best attempt to populate new processor metrics using a parallel array of the old metrics. This is not ideal since - //the per processor metrics may get reset when the arrays don't match. However, to get to an ideal model, unique and - //consistent id's per processor and/or semantic equals for each processor will be needed. - if (newPerProcessMetrics.size() == oldPerProcessMetrics.size()) { - Iterator> oldMetricsIterator = oldPerProcessMetrics.iterator(); - for (Tuple compositeMetric : newPerProcessMetrics) { - String type = compositeMetric.v1().getType(); - IngestMetric metric = compositeMetric.v2(); - if (oldMetricsIterator.hasNext()) { - Tuple oldCompositeMetric = oldMetricsIterator.next(); - String oldType = oldCompositeMetric.v1().getType(); - IngestMetric oldMetric = oldCompositeMetric.v2(); - if (type.equals(oldType)) { - metric.add(oldMetric); - } - } - } - } - } - }); - } - } - /** * Recursive method to obtain all of the non-failure processors for given compoundProcessor. Since conditionals are implemented as * wrappers to the actual processor, always prefer the actual processor's metric over the conditional processor's metric. @@ -324,25 +287,6 @@ private static List> getProcessorMetrics(Compound return processorMetrics; } - private static Pipeline substitutePipeline(String id, ElasticsearchParseException e) { - String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null; - String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown"; - String errorMessage = "pipeline with id [" + id + "] could not be loaded, caused by [" + e.getDetailedMessage() + "]"; - Processor failureProcessor = new AbstractProcessor(tag) { - @Override - public IngestDocument execute(IngestDocument ingestDocument) { - throw new IllegalStateException(errorMessage); - } - - @Override - public String getType() { - return type; - } - }; - String description = "this is a place holder pipeline, because pipeline with id [" + id + "] could not be loaded"; - return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); - } - static ClusterState innerPut(PutPipelineRequest request, ClusterState currentState) { IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); Map pipelines; @@ -403,10 +347,11 @@ protected void doRun() { String pipelineId = indexRequest.getPipeline(); if (NOOP_PIPELINE_NAME.equals(pipelineId) == false) { try { - Pipeline pipeline = pipelines.get(pipelineId); - if (pipeline == null) { + PipelineHolder holder = pipelines.get(pipelineId); + if (holder == null) { throw new IllegalArgumentException("pipeline with id [" + pipelineId + "] does not exist"); } + Pipeline pipeline = holder.pipeline; innerExecute(indexRequest, pipeline, itemDroppedHandler); //this shouldn't be needed here but we do it for consistency with index api // which requires it to prevent double execution @@ -424,7 +369,8 @@ protected void doRun() { public IngestStats stats() { IngestStats.Builder statsBuilder = new IngestStats.Builder(); statsBuilder.addTotalMetrics(totalMetrics); - pipelines.forEach((id, pipeline) -> { + pipelines.forEach((id, holder) -> { + Pipeline pipeline = holder.pipeline; CompoundProcessor rootProcessor = pipeline.getCompoundProcessor(); statsBuilder.addPipelineMetrics(id, pipeline.getMetrics()); List> processorMetrics = new ArrayList<>(); @@ -503,37 +449,146 @@ private void innerExecute(IndexRequest indexRequest, Pipeline pipeline, Consumer } } - private void innerUpdatePipelines(ClusterState previousState, ClusterState state) { + @Override + public void applyClusterState(final ClusterChangedEvent event) { + ClusterState state = event.state(); if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { return; } - IngestMetadata ingestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); - IngestMetadata previousIngestMetadata = previousState.getMetaData().custom(IngestMetadata.TYPE); - if (Objects.equals(ingestMetadata, previousIngestMetadata)) { + IngestMetadata newIngestMetadata = state.getMetaData().custom(IngestMetadata.TYPE); + if (newIngestMetadata == null) { return; } - Map pipelines = new HashMap<>(); - List exceptions = new ArrayList<>(); - for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { + try { + innerUpdatePipelines(newIngestMetadata); + } catch (ElasticsearchParseException e) { + logger.warn("failed to update ingest pipelines", e); + } + } + + void innerUpdatePipelines(IngestMetadata newIngestMetadata) { + Map existingPipelines = this.pipelines; + + // Lazy initialize these variables in order to favour the most like scenario that there are no pipeline changes: + Map newPipelines = null; + List exceptions = null; + // Iterate over pipeline configurations in ingest metadata and constructs a new pipeline if there is no pipeline + // or the pipeline configuration has been modified + for (PipelineConfiguration newConfiguration : newIngestMetadata.getPipelines().values()) { + PipelineHolder previous = existingPipelines.get(newConfiguration.getId()); + if (previous != null && previous.configuration.equals(newConfiguration)) { + continue; + } + + if (newPipelines == null) { + newPipelines = new HashMap<>(existingPipelines); + } try { - pipelines.put( - pipeline.getId(), - Pipeline.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories, scriptService) + Pipeline newPipeline = + Pipeline.create(newConfiguration.getId(), newConfiguration.getConfigAsMap(), processorFactories, scriptService); + newPipelines.put( + newConfiguration.getId(), + new PipelineHolder(newConfiguration, newPipeline) ); + + if (previous == null) { + continue; + } + Pipeline oldPipeline = previous.pipeline; + newPipeline.getMetrics().add(oldPipeline.getMetrics()); + List> oldPerProcessMetrics = new ArrayList<>(); + List> newPerProcessMetrics = new ArrayList<>(); + getProcessorMetrics(oldPipeline.getCompoundProcessor(), oldPerProcessMetrics); + getProcessorMetrics(newPipeline.getCompoundProcessor(), newPerProcessMetrics); + //Best attempt to populate new processor metrics using a parallel array of the old metrics. This is not ideal since + //the per processor metrics may get reset when the arrays don't match. However, to get to an ideal model, unique and + //consistent id's per processor and/or semantic equals for each processor will be needed. + if (newPerProcessMetrics.size() == oldPerProcessMetrics.size()) { + Iterator> oldMetricsIterator = oldPerProcessMetrics.iterator(); + for (Tuple compositeMetric : newPerProcessMetrics) { + String type = compositeMetric.v1().getType(); + IngestMetric metric = compositeMetric.v2(); + if (oldMetricsIterator.hasNext()) { + Tuple oldCompositeMetric = oldMetricsIterator.next(); + String oldType = oldCompositeMetric.v1().getType(); + IngestMetric oldMetric = oldCompositeMetric.v2(); + if (type.equals(oldType)) { + metric.add(oldMetric); + } + } + } + } } catch (ElasticsearchParseException e) { - pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), e)); + Pipeline pipeline = substitutePipeline(newConfiguration.getId(), e); + newPipelines.put(newConfiguration.getId(), new PipelineHolder(newConfiguration, pipeline)); + if (exceptions == null) { + exceptions = new ArrayList<>(); + } exceptions.add(e); } catch (Exception e) { ElasticsearchParseException parseException = new ElasticsearchParseException( - "Error updating pipeline with id [" + pipeline.getId() + "]", e); - pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), parseException)); + "Error updating pipeline with id [" + newConfiguration.getId() + "]", e); + Pipeline pipeline = substitutePipeline(newConfiguration.getId(), parseException); + newPipelines.put(newConfiguration.getId(), new PipelineHolder(newConfiguration, pipeline)); + if (exceptions == null) { + exceptions = new ArrayList<>(); + } exceptions.add(parseException); } } - this.pipelines = Collections.unmodifiableMap(pipelines); - ExceptionsHelper.rethrowAndSuppress(exceptions); + + // Iterate over the current active pipelines and check whether they are missing in the pipeline configuration and + // if so delete the pipeline from new Pipelines map: + for (Map.Entry entry : existingPipelines.entrySet()) { + if (newIngestMetadata.getPipelines().get(entry.getKey()) == null) { + if (newPipelines == null) { + newPipelines = new HashMap<>(existingPipelines); + } + newPipelines.remove(entry.getKey()); + } + } + + if (newPipelines != null) { + // Update the pipelines: + this.pipelines = Map.copyOf(newPipelines); + + // Rethrow errors that may have occurred during creating new pipeline instances: + if (exceptions != null) { + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + } + } + + private static Pipeline substitutePipeline(String id, ElasticsearchParseException e) { + String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null; + String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown"; + String errorMessage = "pipeline with id [" + id + "] could not be loaded, caused by [" + e.getDetailedMessage() + "]"; + Processor failureProcessor = new AbstractProcessor(tag) { + @Override + public IngestDocument execute(IngestDocument ingestDocument) { + throw new IllegalStateException(errorMessage); + } + + @Override + public String getType() { + return type; + } + }; + String description = "this is a place holder pipeline, because pipeline with id [" + id + "] could not be loaded"; + return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); + } + + static class PipelineHolder { + + final PipelineConfiguration configuration; + final Pipeline pipeline; + + PipelineHolder(PipelineConfiguration configuration, Pipeline pipeline) { + this.configuration = Objects.requireNonNull(configuration); + this.pipeline = Objects.requireNonNull(pipeline); + } } } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index e5aea1f5d5ce1..43e2a8a584979 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -153,10 +153,72 @@ public void testUpdatePipelines() { .build(); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); assertThat(ingestService.pipelines().size(), is(1)); - assertThat(ingestService.pipelines().get("_id").getId(), equalTo("_id")); - assertThat(ingestService.pipelines().get("_id").getDescription(), nullValue()); - assertThat(ingestService.pipelines().get("_id").getProcessors().size(), equalTo(1)); - assertThat(ingestService.pipelines().get("_id").getProcessors().get(0).getType(), equalTo("set")); + assertThat(ingestService.pipelines().get("_id").pipeline.getId(), equalTo("_id")); + assertThat(ingestService.pipelines().get("_id").pipeline.getDescription(), nullValue()); + assertThat(ingestService.pipelines().get("_id").pipeline.getProcessors().size(), equalTo(1)); + assertThat(ingestService.pipelines().get("_id").pipeline.getProcessors().get(0).getType(), equalTo("set")); + } + + public void testInnerUpdatePipelines() { + IngestService ingestService = createWithProcessors(); + assertThat(ingestService.pipelines().size(), is(0)); + + PipelineConfiguration pipeline1 = new PipelineConfiguration("_id1", new BytesArray("{\"processors\": []}"), XContentType.JSON); + IngestMetadata ingestMetadata = new IngestMetadata(Map.of("_id1", pipeline1)); + + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines().size(), is(1)); + assertThat(ingestService.pipelines().get("_id1").pipeline.getId(), equalTo("_id1")); + assertThat(ingestService.pipelines().get("_id1").pipeline.getProcessors().size(), equalTo(0)); + + PipelineConfiguration pipeline2 = new PipelineConfiguration("_id2", new BytesArray("{\"processors\": []}"), XContentType.JSON); + ingestMetadata = new IngestMetadata(Map.of("_id1", pipeline1, "_id2", pipeline2)); + + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines().size(), is(2)); + assertThat(ingestService.pipelines().get("_id1").pipeline.getId(), equalTo("_id1")); + assertThat(ingestService.pipelines().get("_id1").pipeline.getProcessors().size(), equalTo(0)); + assertThat(ingestService.pipelines().get("_id2").pipeline.getId(), equalTo("_id2")); + assertThat(ingestService.pipelines().get("_id2").pipeline.getProcessors().size(), equalTo(0)); + + PipelineConfiguration pipeline3 = new PipelineConfiguration("_id3", new BytesArray("{\"processors\": []}"), XContentType.JSON); + ingestMetadata = new IngestMetadata(Map.of("_id1", pipeline1, "_id2", pipeline2, "_id3", pipeline3)); + + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines().size(), is(3)); + assertThat(ingestService.pipelines().get("_id1").pipeline.getId(), equalTo("_id1")); + assertThat(ingestService.pipelines().get("_id1").pipeline.getProcessors().size(), equalTo(0)); + assertThat(ingestService.pipelines().get("_id2").pipeline.getId(), equalTo("_id2")); + assertThat(ingestService.pipelines().get("_id2").pipeline.getProcessors().size(), equalTo(0)); + assertThat(ingestService.pipelines().get("_id3").pipeline.getId(), equalTo("_id3")); + assertThat(ingestService.pipelines().get("_id3").pipeline.getProcessors().size(), equalTo(0)); + + ingestMetadata = new IngestMetadata(Map.of("_id1", pipeline1, "_id3", pipeline3)); + + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines().size(), is(2)); + assertThat(ingestService.pipelines().get("_id1").pipeline.getId(), equalTo("_id1")); + assertThat(ingestService.pipelines().get("_id1").pipeline.getProcessors().size(), equalTo(0)); + assertThat(ingestService.pipelines().get("_id3").pipeline.getId(), equalTo("_id3")); + assertThat(ingestService.pipelines().get("_id3").pipeline.getProcessors().size(), equalTo(0)); + + pipeline3 = new PipelineConfiguration( + "_id3",new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), XContentType.JSON + ); + ingestMetadata = new IngestMetadata(Map.of("_id1", pipeline1, "_id3", pipeline3)); + + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines().size(), is(2)); + assertThat(ingestService.pipelines().get("_id1").pipeline.getId(), equalTo("_id1")); + assertThat(ingestService.pipelines().get("_id1").pipeline.getProcessors().size(), equalTo(0)); + assertThat(ingestService.pipelines().get("_id3").pipeline.getId(), equalTo("_id3")); + assertThat(ingestService.pipelines().get("_id3").pipeline.getProcessors().size(), equalTo(1)); + assertThat(ingestService.pipelines().get("_id3").pipeline.getProcessors().get(0).getType(), equalTo("set")); + + // Perform an update with no changes: + Map pipelines = ingestService.pipelines(); + ingestService.innerUpdatePipelines(ingestMetadata); + assertThat(ingestService.pipelines(), sameInstance(pipelines)); } public void testDelete() { From cdb482eaae0517f202efdfaf445e8847917182f1 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 11:31:42 +0200 Subject: [PATCH 283/321] Fix RareClusterStateIT (#42430) * It looks like we might be cancelling a previous publication instead of the one triggered by the given request with a very low likelihood. * Fixed by adding a wait for no in-progress publications * Also added debug logging that would've identified this problem * Closes #36813 --- .../cluster/coordination/Coordinator.java | 10 +++++++--- .../cluster/coordination/RareClusterStateIT.java | 4 ++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 1e7b38e50d1e9..376dd640c56b2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -1165,9 +1165,13 @@ public Iterable getFoundPeers() { */ boolean cancelCommittedPublication() { synchronized (mutex) { - if (currentPublication.isPresent() && currentPublication.get().isCommitted()) { - currentPublication.get().cancel("cancelCommittedPublication"); - return true; + if (currentPublication.isPresent()) { + final CoordinatorPublication publication = currentPublication.get(); + if (publication.isCommitted()) { + publication.cancel("cancelCommittedPublication"); + logger.debug("Cancelled publication of [{}].", publication); + return true; + } } return false; } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index 27036680880b2..62491724b9221 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -141,6 +141,10 @@ public void onFailure(String source, Exception e) { private ActionFuture executeAndCancelCommittedPublication( ActionRequestBuilder req) throws Exception { + // Wait for no publication in progress to not accidentally cancel a publication different from the one triggered by the given + // request. + assertBusy( + () -> assertFalse(((Coordinator) internalCluster().getCurrentMasterNodeInstance(Discovery.class)).publicationInProgress())); ActionFuture future = req.execute(); assertBusy( () -> assertTrue(((Coordinator)internalCluster().getCurrentMasterNodeInstance(Discovery.class)).cancelCommittedPublication())); From 71e978bcc03b568afa945570315a37d346a2d364 Mon Sep 17 00:00:00 2001 From: bellengao Date: Mon, 27 May 2019 17:47:46 +0800 Subject: [PATCH 284/321] Update script-fields.asciidoc (#42490) --- docs/reference/search/request/script-fields.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc index da5868ea7d65e..1bd61e0048182 100644 --- a/docs/reference/search/request/script-fields.asciidoc +++ b/docs/reference/search/request/script-fields.asciidoc @@ -33,7 +33,7 @@ GET /_search // CONSOLE // TEST[setup:sales] -Script fields can work on fields that are not stored (`my_field_name` in +Script fields can work on fields that are not stored (`price` in the above case), and allow to return custom values to be returned (the evaluated value of the script). From 50ed840e5275f89e868b554c387f6b2e76093370 Mon Sep 17 00:00:00 2001 From: Travis Steel Date: Mon, 27 May 2019 04:53:51 -0500 Subject: [PATCH 285/321] Fixed typo in docker.asciidoc (#42455) --- docs/reference/setup/install/docker.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 1fcc261d68e1f..9037a292168de 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -332,7 +332,7 @@ data through a bind-mount: As a last resort, you can also force the container to mutate the ownership of any bind-mounts used for the <> through the -environment variable `TAKE_FILE_OWNERSHIP`. Inn this case, they will be owned by +environment variable `TAKE_FILE_OWNERSHIP`. In this case, they will be owned by uid:gid `1000:0` providing read/write access to the {es} process as required. -- From c57b5750874da9050d2a773bd92169a91b8eb589 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Mon, 27 May 2019 12:57:01 +0200 Subject: [PATCH 286/321] Remove unused mapStringsOrdered method (#42513) Remove unused mapStringsOrdered method --- .../common/xcontent/XContentParser.java | 4 +--- .../common/xcontent/XContentSubParser.java | 5 ----- .../support/AbstractXContentParser.java | 17 +---------------- .../common/xcontent/XContentParserTests.java | 2 +- .../support/xcontent/WatcherXContentParser.java | 5 ----- 5 files changed, 3 insertions(+), 30 deletions(-) diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index 6d4da08bfaa59..93321048d86c4 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -136,11 +136,9 @@ enum NumberType { Map mapStrings() throws IOException; - Map mapStringsOrdered() throws IOException; - /** * Returns an instance of {@link Map} holding parsed map. - * Serves as a replacement for the "map", "mapOrdered", "mapStrings" and "mapStringsOrdered" methods above. + * Serves as a replacement for the "map", "mapOrdered" and "mapStrings" methods above. * * @param mapFactory factory for creating new {@link Map} objects * @param mapValueParser parser for parsing a single map value diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java index 252bfea7ca9c0..9a8686001e2dc 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java @@ -108,11 +108,6 @@ public Map mapStrings() throws IOException { return parser.mapStrings(); } - @Override - public Map mapStringsOrdered() throws IOException { - return parser.mapStringsOrdered(); - } - @Override public Map map( Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index 68e03e34a1a17..043293b0cc319 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -276,11 +276,6 @@ public Map mapStrings() throws IOException { return readMapStrings(this); } - @Override - public Map mapStringsOrdered() throws IOException { - return readOrderedMapStrings(this); - } - @Override public Map map( Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { @@ -303,8 +298,6 @@ public List listOrderedMap() throws IOException { static final Supplier> SIMPLE_MAP_STRINGS_FACTORY = HashMap::new; - static final Supplier> ORDERED_MAP_STRINGS_FACTORY = LinkedHashMap::new; - static Map readMap(XContentParser parser) throws IOException { return readMap(parser, SIMPLE_MAP_FACTORY); } @@ -314,11 +307,7 @@ static Map readOrderedMap(XContentParser parser) throws IOExcept } static Map readMapStrings(XContentParser parser) throws IOException { - return readMapStrings(parser, SIMPLE_MAP_STRINGS_FACTORY); - } - - static Map readOrderedMapStrings(XContentParser parser) throws IOException { - return readMapStrings(parser, ORDERED_MAP_STRINGS_FACTORY); + return readGenericMap(parser, SIMPLE_MAP_STRINGS_FACTORY, XContentParser::text); } static List readList(XContentParser parser) throws IOException { @@ -333,10 +322,6 @@ static Map readMap(XContentParser parser, Supplier readValue(p, mapFactory)); } - static Map readMapStrings(XContentParser parser, Supplier> mapFactory) throws IOException { - return readGenericMap(parser, mapFactory, XContentParser::text); - } - static Map readGenericMap( XContentParser parser, Supplier> mapFactory, diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index c519880224ccb..31a00c4025ab2 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -189,7 +189,7 @@ private Map readMapStrings(String source) throws IOException { assertThat(parser.currentName(), equalTo("foo")); token = parser.nextToken(); assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); - return randomBoolean() ? parser.mapStringsOrdered() : parser.mapStrings(); + return parser.mapStrings(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java index 1d155a5f0c02d..20b0086c1e4e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java @@ -120,11 +120,6 @@ public Map mapStrings() throws IOException { return parser.mapStrings(); } - @Override - public Map mapStringsOrdered() throws IOException { - return parser.mapStringsOrdered(); - } - @Override public Map map( Supplier> mapFactory, CheckedFunction mapValueParser) throws IOException { From 074da02f441b0407b4da446ab877db6444ea1109 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 19:29:51 +0200 Subject: [PATCH 287/321] Dry up BlobStoreRepository#basePath Implementations (#42578) * This method is just a getter in every implementation => moved the field and concrete getter to the base class to simplify implementations --- .../repositories/url/URLRepository.java | 10 +----- .../repositories/azure/AzureRepository.java | 34 ++++++++----------- .../gcs/GoogleCloudStorageRepository.java | 25 ++++++-------- .../repositories/hdfs/HdfsRepository.java | 8 +---- .../repositories/s3/S3Repository.java | 25 ++++++-------- .../blobstore/BlobStoreRepository.java | 10 ++++-- .../repositories/fs/FsRepository.java | 10 +----- 7 files changed, 47 insertions(+), 75 deletions(-) diff --git a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java index 0ea2a1b72c574..a27b091cfc037 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java @@ -75,8 +75,6 @@ public class URLRepository extends BlobStoreRepository { private final Environment environment; - private final BlobPath basePath; - private final URL url; /** @@ -84,7 +82,7 @@ public class URLRepository extends BlobStoreRepository { */ public URLRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool); + super(metadata, environment.settings(), namedXContentRegistry, threadPool, BlobPath.cleanPath()); if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) { throw new RepositoryException(metadata.name(), "missing url"); @@ -92,7 +90,6 @@ public URLRepository(RepositoryMetaData metadata, Environment environment, this.environment = environment; supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(environment.settings()); urlWhiteList = ALLOWED_URLS_SETTING.get(environment.settings()).toArray(new URIPattern[]{}); - basePath = BlobPath.cleanPath(); url = URL_SETTING.exists(metadata.settings()) ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(environment.settings()); } @@ -115,11 +112,6 @@ protected BlobStore getBlobStore() { return super.getBlobStore(); } - @Override - protected BlobPath basePath() { - return basePath; - } - /** * Makes sure that the url is white listed or if it points to the local file system it matches one on of the root path in path.repo */ diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 7c3520918fc58..403ef10d3444d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -79,17 +79,27 @@ public static final class Repository { public static final Setting READONLY_SETTING = Setting.boolSetting("readonly", false, Property.NodeScope); } - private final BlobPath basePath; private final ByteSizeValue chunkSize; private final AzureStorageService storageService; private final boolean readonly; public AzureRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, AzureStorageService storageService, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool); + super(metadata, environment.settings(), namedXContentRegistry, threadPool, buildBasePath(metadata)); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.storageService = storageService; + // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. + // For secondary_only setting, the repository should be read only + final LocationMode locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); + if (Repository.READONLY_SETTING.exists(metadata.settings())) { + this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); + } else { + this.readonly = locationMode == LocationMode.SECONDARY_ONLY; + } + } + + private static BlobPath buildBasePath(RepositoryMetaData metadata) { final String basePath = Strings.trimLeadingCharacter(Repository.BASE_PATH_SETTING.get(metadata.settings()), '/'); if (Strings.hasLength(basePath)) { // Remove starting / if any @@ -97,18 +107,9 @@ public AzureRepository(RepositoryMetaData metadata, Environment environment, Nam for(final String elem : basePath.split("/")) { path = path.add(elem); } - this.basePath = path; - } else { - this.basePath = BlobPath.cleanPath(); - } - - // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. - // For secondary_only setting, the repository should be read only - final LocationMode locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); - if (Repository.READONLY_SETTING.exists(metadata.settings())) { - this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); + return path; } else { - this.readonly = locationMode == LocationMode.SECONDARY_ONLY; + return BlobPath.cleanPath(); } } @@ -123,15 +124,10 @@ protected AzureBlobStore createBlobStore() { logger.debug(() -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", - blobStore, chunkSize, isCompress(), basePath)); + blobStore, chunkSize, isCompress(), basePath())); return blobStore; } - @Override - protected BlobPath basePath() { - return basePath; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 804fafd5e855e..6382a537c4682 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -57,7 +57,6 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); private final GoogleCloudStorageService storageService; - private final BlobPath basePath; private final ByteSizeValue chunkSize; private final String bucket; private final String clientName; @@ -65,24 +64,27 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, GoogleCloudStorageService storageService, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool); + super(metadata, environment.settings(), namedXContentRegistry, threadPool, buildBasePath(metadata)); this.storageService = storageService; + this.chunkSize = getSetting(CHUNK_SIZE, metadata); + this.bucket = getSetting(BUCKET, metadata); + this.clientName = CLIENT_NAME.get(metadata.settings()); + logger.debug( + "using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath(), chunkSize, isCompress()); + } + + private static BlobPath buildBasePath(RepositoryMetaData metadata) { String basePath = BASE_PATH.get(metadata.settings()); if (Strings.hasLength(basePath)) { BlobPath path = new BlobPath(); for (String elem : basePath.split("/")) { path = path.add(elem); } - this.basePath = path; + return path; } else { - this.basePath = BlobPath.cleanPath(); + return BlobPath.cleanPath(); } - - this.chunkSize = getSetting(CHUNK_SIZE, metadata); - this.bucket = getSetting(BUCKET, metadata); - this.clientName = CLIENT_NAME.get(metadata.settings()); - logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, isCompress()); } @Override @@ -90,11 +92,6 @@ protected GoogleCloudStorageBlobStore createBlobStore() { return new GoogleCloudStorageBlobStore(bucket, clientName, storageService); } - @Override - protected BlobPath basePath() { - return basePath; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index b614753d83883..b51f843162a74 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -59,7 +59,6 @@ public final class HdfsRepository extends BlobStoreRepository { private final Environment environment; private final ByteSizeValue chunkSize; - private final BlobPath basePath = BlobPath.cleanPath(); private final URI uri; private final String pathSetting; @@ -69,7 +68,7 @@ public final class HdfsRepository extends BlobStoreRepository { public HdfsRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool); + super(metadata, environment.settings(), namedXContentRegistry, threadPool, BlobPath.cleanPath()); this.environment = environment; this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); @@ -233,11 +232,6 @@ protected HdfsBlobStore createBlobStore() { return blobStore; } - @Override - protected BlobPath basePath() { - return basePath; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index a91e7bf663f92..c099d5a2e064c 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -150,8 +150,6 @@ class S3Repository extends BlobStoreRepository { private final ByteSizeValue chunkSize; - private final BlobPath basePath; - private final boolean serverSideEncryption; private final String storageClass; @@ -165,7 +163,7 @@ class S3Repository extends BlobStoreRepository { final Settings settings, final NamedXContentRegistry namedXContentRegistry, final S3Service service, final ThreadPool threadPool) { - super(metadata, settings, namedXContentRegistry, threadPool); + super(metadata, settings, namedXContentRegistry, threadPool, buildBasePath(metadata)); this.service = service; // Parse and validate the user's S3 Storage Class setting @@ -183,13 +181,6 @@ class S3Repository extends BlobStoreRepository { ") can't be lower than " + BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ")."); } - final String basePath = BASE_PATH_SETTING.get(metadata.settings()); - if (Strings.hasLength(basePath)) { - this.basePath = new BlobPath().add(basePath); - } else { - this.basePath = BlobPath.cleanPath(); - } - this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); @@ -211,6 +202,15 @@ class S3Repository extends BlobStoreRepository { storageClass); } + private static BlobPath buildBasePath(RepositoryMetaData metadata) { + final String basePath = BASE_PATH_SETTING.get(metadata.settings()); + if (Strings.hasLength(basePath)) { + return new BlobPath().add(basePath); + } else { + return BlobPath.cleanPath(); + } + } + @Override protected S3BlobStore createBlobStore() { return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, metadata); @@ -228,11 +228,6 @@ protected BlobStore getBlobStore() { return super.getBlobStore(); } - @Override - protected BlobPath basePath() { - return basePath; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 49b551b26b796..1cb50f0f1a0da 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -197,6 +197,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final SetOnce blobStore = new SetOnce<>(); + private final BlobPath basePath; + /** * Constructs new BlobStoreRepository * @param metadata The metadata for this repository including name and settings @@ -204,7 +206,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp * @param threadPool Threadpool to run long running repository manipulations on asynchronously */ protected BlobStoreRepository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry, - ThreadPool threadPool) { + ThreadPool threadPool, BlobPath basePath) { this.settings = settings; this.metadata = metadata; this.threadPool = threadPool; @@ -212,6 +214,7 @@ protected BlobStoreRepository(RepositoryMetaData metadata, Settings settings, Na snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); readOnly = metadata.settings().getAsBoolean("readonly", false); + this.basePath = basePath; indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot::fromXContent, namedXContentRegistry, compress); @@ -317,8 +320,11 @@ protected BlobStore blobStore() { /** * Returns base path of the repository + * Public for testing. */ - protected abstract BlobPath basePath(); + public BlobPath basePath() { + return basePath; + } /** * Returns true if metadata and snapshot files should be compressed diff --git a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index 8f495f2d4842a..f338e0ee4cb08 100644 --- a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -66,14 +66,12 @@ public class FsRepository extends BlobStoreRepository { private final ByteSizeValue chunkSize; - private final BlobPath basePath; - /** * Constructs a shared file system repository. */ public FsRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, ThreadPool threadPool) { - super(metadata, environment.settings(), namedXContentRegistry, threadPool); + super(metadata, environment.settings(), namedXContentRegistry, threadPool, BlobPath.cleanPath()); this.environment = environment; String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); if (location.isEmpty()) { @@ -101,7 +99,6 @@ public FsRepository(RepositoryMetaData metadata, Environment environment, NamedX } else { this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings()); } - this.basePath = BlobPath.cleanPath(); } @Override @@ -115,9 +112,4 @@ protected BlobStore createBlobStore() throws Exception { protected ByteSizeValue chunkSize() { return chunkSize; } - - @Override - protected BlobPath basePath() { - return basePath; - } } From 349d2ce153d4be78bf0d8ae2472bfc88e18530c1 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 22:40:23 +0200 Subject: [PATCH 288/321] Add Infrastructure to Run 3rd Party Repository Tests (#42586) * Add Infrastructure to Run 3rd Party Repository Tests * Add infrastructure to run third party repository tests using our standard JUnit infrastructure * This is a prerequisite of #42189 --- plugins/repository-azure/build.gradle | 21 +++++ .../AzureStorageCleanupThirdPartyTests.java | 65 +++++++++++++ plugins/repository-gcs/build.gradle | 21 +++++ .../GoogleCloudStorageThirdPartyTests.java | 64 +++++++++++++ plugins/repository-s3/build.gradle | 43 ++++++++- .../s3/S3RepositoryThirdPartyTests.java | 73 +++++++++++++++ .../AbstractThirdPartyRepositoryTestCase.java | 91 +++++++++++++++++++ 7 files changed, 373 insertions(+), 5 deletions(-) create mode 100644 plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java create mode 100644 plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java create mode 100644 test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index a7c1af412d949..2669e4bf6092a 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -71,3 +71,24 @@ testClusters { keystore 'azure.client.integration_test.key', 'azure_key' } } + +String azureAccount = System.getenv("azure_storage_account") +String azureKey = System.getenv("azure_storage_key") +String azureContainer = System.getenv("azure_storage_container") +String azureBasePath = System.getenv("azure_storage_base_path") + +test { + exclude '**/AzureStorageCleanupThirdPartyTests.class' +} + +task thirdPartyTest(type: Test) { + include '**/AzureStorageCleanupThirdPartyTests.class' + systemProperty 'test.azure.account', azureAccount ? azureAccount : "" + systemProperty 'test.azure.key', azureKey ? azureKey : "" + systemProperty 'test.azure.container', azureContainer ? azureContainer : "" + systemProperty 'test.azure.base', azureBasePath ? azureBasePath : "" +} + +if (azureAccount || azureKey || azureContainer || azureBasePath) { + check.dependsOn(thirdPartyTest) +} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java new file mode 100644 index 0000000000000..596fdf73342eb --- /dev/null +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; + +import java.util.Collection; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(AzureRepositoryPlugin.class); + } + + @Override + protected SecureSettings credentials() { + assertThat(System.getProperty("test.azure.account"), not(blankOrNullString())); + assertThat(System.getProperty("test.azure.key"), not(blankOrNullString())); + assertThat(System.getProperty("test.azure.container"), not(blankOrNullString())); + assertThat(System.getProperty("test.azure.base"), not(blankOrNullString())); + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("azure.client.default.account", System.getProperty("test.azure.account")); + secureSettings.setString("azure.client.default.key", System.getProperty("test.azure.key")); + return secureSettings; + } + + @Override + protected void createRepository(String repoName) { + AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository(repoName) + .setType("azure") + .setSettings(Settings.builder() + .put("container", System.getProperty("test.azure.container")) + .put("base_path", System.getProperty("test.azure.base")) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } +} diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index e5af9081ca189..288ab3c99f17b 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -1,3 +1,5 @@ +import java.nio.file.Files + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -122,3 +124,22 @@ check { // also execute the QA tests when testing the plugin dependsOn 'qa:google-cloud-storage:check' } + +String gcsServiceAccount = System.getenv("google_storage_service_account") +String gcsBucket = System.getenv("google_storage_bucket") +String gcsBasePath = System.getenv("google_storage_base_path") + +test { + exclude '**/GoogleCloudStorageThirdPartyTests.class' +} + +task thirdPartyTest(type: Test) { + include '**/GoogleCloudStorageThirdPartyTests.class' + systemProperty 'test.google.account', gcsServiceAccount ? Base64.encoder.encodeToString(Files.readAllBytes(file(gcsServiceAccount).toPath())) : "" + systemProperty 'test.google.bucket', gcsBucket ? gcsBucket : "" + systemProperty 'test.google.base', gcsBasePath ? gcsBasePath : "/" +} + +if (gcsServiceAccount || gcsBucket || gcsBasePath) { + check.dependsOn(thirdPartyTest) +} \ No newline at end of file diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java new file mode 100644 index 0000000000000..06eb63ddd22f0 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.gcs; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; + +import java.util.Base64; +import java.util.Collection; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class GoogleCloudStorageThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(GoogleCloudStoragePlugin.class); + } + + @Override + protected SecureSettings credentials() { + assertThat(System.getProperty("test.google.account"), not(blankOrNullString())); + assertThat(System.getProperty("test.google.bucket"), not(blankOrNullString())); + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setFile("gcs.client.default.credentials_file", + Base64.getDecoder().decode(System.getProperty("test.google.account"))); + return secureSettings; + } + + @Override + protected void createRepository(final String repoName) { + AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + .setType("gcs") + .setSettings(Settings.builder() + .put("bucket", System.getProperty("test.google.bucket")) + .put("base_path", System.getProperty("test.google.base", "/")) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } +} diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 60a4e97cfa493..bf32b99f9dc38 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -75,6 +75,7 @@ test { // these are tested explicitly in separate test tasks exclude '**/*CredentialsTests.class' exclude '**/S3BlobStoreRepositoryTests.class' + exclude '**/S3RepositoryThirdPartyTests.class' } boolean useFixture = false @@ -134,6 +135,14 @@ if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present") } +task thirdPartyTest(type: Test) { + include '**/S3RepositoryThirdPartyTests.class' + systemProperty 'test.s3.account', s3PermanentAccessKey + systemProperty 'test.s3.key', s3PermanentSecretKey + systemProperty 'test.s3.bucket', s3PermanentBucket + systemProperty 'test.s3.base', s3PermanentBasePath +} + if (useFixture) { apply plugin: 'elasticsearch.test.fixtures' task writeDockerFile { @@ -151,6 +160,32 @@ if (useFixture) { dependsOn(writeDockerFile) } + def minioAddress = { + int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" + assert minioPort > 0 + return 'http://127.0.0.1:' + minioPort + } + + File minioAddressFile = new File(project.buildDir, 'generated-resources/s3Fixture.address') + + // We can't lazy evaluate a system property for the Minio address passed to JUnit so we write it to a resource file + // and pass its name instead. + task writeMinioAddress { + dependsOn tasks.bundlePlugin, tasks.postProcessFixture + outputs.file(minioAddressFile) + doLast { + file(minioAddressFile).text = "${ -> minioAddress.call() }" + } + } + + thirdPartyTest { + dependsOn writeMinioAddress + inputs.file(minioAddressFile) + systemProperty 'test.s3.endpoint', minioAddressFile.name + } + + BuildPlugin.requireDocker(tasks.thirdPartyTest) + task integTestMinio(type: RestIntegTestTask) { description = "Runs REST tests using the Minio repository." dependsOn tasks.bundlePlugin, tasks.postProcessFixture @@ -169,11 +204,7 @@ if (useFixture) { testClusters.integTestMinio { keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - setting 's3.client.integration_test_permanent.endpoint', { - int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" - assert minioPort > 0 - return 'http://127.0.0.1:' + minioPort - } + setting 's3.client.integration_test_permanent.endpoint', minioAddress plugin file(tasks.bundlePlugin.archiveFile) } @@ -191,6 +222,8 @@ if (useFixture) { } } +check.dependsOn(thirdPartyTest) + File parentFixtures = new File(project.buildDir, "fixtures") File s3FixtureFile = new File(parentFixtures, 's3Fixture.properties') diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java new file mode 100644 index 0000000000000..88e293575488f --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; +import org.elasticsearch.test.StreamsUtils; + +import java.io.IOException; +import java.util.Collection; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(S3RepositoryPlugin.class); + } + + @Override + protected SecureSettings credentials() { + assertThat(System.getProperty("test.s3.account"), not(blankOrNullString())); + assertThat(System.getProperty("test.s3.key"), not(blankOrNullString())); + assertThat(System.getProperty("test.s3.bucket"), not(blankOrNullString())); + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.access_key", System.getProperty("test.s3.account")); + secureSettings.setString("s3.client.default.secret_key", System.getProperty("test.s3.key")); + return secureSettings; + } + + @Override + protected void createRepository(String repoName) { + Settings.Builder settings = Settings.builder() + .put("bucket", System.getProperty("test.s3.bucket")) + .put("base_path", System.getProperty("test.s3.base", "/")); + final String endpointPath = System.getProperty("test.s3.endpoint"); + if (endpointPath != null) { + try { + settings = settings.put("endpoint", StreamsUtils.copyToStringFromClasspath("/" + endpointPath)); + } catch (IOException e) { + throw new AssertionError(e); + } + } + AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + .setType("s3") + .setSettings(settings).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java new file mode 100644 index 0000000000000..90c399a5af6c7 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories; + +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public abstract class AbstractThirdPartyRepositoryTestCase extends ESSingleNodeTestCase { + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .setSecureSettings(credentials()) + .build(); + } + + protected abstract SecureSettings credentials(); + + protected abstract void createRepository(String repoName); + + + public void testCreateSnapshot() { + createRepository("test-repo"); + + createIndex("test-idx-1"); + createIndex("test-idx-2"); + createIndex("test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + client().prepareIndex("test-idx-1", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + client().prepareIndex("test-idx-2", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + client().prepareIndex("test-idx-3", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + } + client().admin().indices().prepareRefresh().get(); + + final String snapshotName = "test-snap-" + System.currentTimeMillis(); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot("test-repo", snapshotName) + .setWaitForCompletion(true) + .setIndices("test-idx-*", "-test-idx-3") + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + assertThat(client().admin() + .cluster() + .prepareGetSnapshots("test-repo") + .setSnapshots(snapshotName) + .get() + .getSnapshots() + .get(0) + .state(), + equalTo(SnapshotState.SUCCESS)); + + assertTrue(client().admin() + .cluster() + .prepareDeleteSnapshot("test-repo", snapshotName) + .get() + .isAcknowledged()); + + } +} From 337cef14b31628dd1880e68183645ee8d87d664e Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 27 May 2019 15:57:59 -0400 Subject: [PATCH 289/321] Add test ensure we can execute update requests in mixed cluster Relates #42596 --- .../org/elasticsearch/upgrades/RecoveryIT.java | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 32ddc77113bc8..49bd5bb3585b6 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -567,4 +567,22 @@ private void ensureGlobalCheckpointSynced(String index) throws Exception { }); }, 60, TimeUnit.SECONDS); } + + /** Ensure that we can always execute update requests regardless of the version of cluster */ + public void testUpdateDoc() throws Exception { + final String index = "test_update_doc"; + if (CLUSTER_TYPE == ClusterType.OLD) { + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); + createIndex(index, settings.build()); + } + ensureGreen(index); + indexDocs(index, 0, 10); + for (int i = 0; i < 10; i++) { + Request update = new Request("POST", index + "/_update/" + i); + update.setJsonEntity("{\"doc\": {\"f\": " + randomNonNegativeLong() + "}}"); + client().performRequest(update); + } + } } From 643eb35a20771ebf3d1c79e5500e49e2ad658d82 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 27 May 2019 17:30:21 -0400 Subject: [PATCH 290/321] Allocate to data-only nodes in ReopenWhileClosingIT (#42560) If all primary shards are allocated on the master node, then the verifying before close step will never interact with mock transport service. This change prefers to allocate shards on data-only nodes. Closes #39757 --- .../indices/state/ReopenWhileClosingIT.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java b/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java index 0f57f518e70cd..8cf3b76184ae4 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.Glob; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -50,7 +51,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, minNumDataNodes = 2) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class ReopenWhileClosingIT extends ESIntegTestCase { @Override @@ -64,8 +65,9 @@ protected int minimumNumberOfShards() { } public void testReopenDuringClose() throws Exception { + List dataOnlyNodes = internalCluster().startDataOnlyNodes(randomIntBetween(2, 3)); final String indexName = "test"; - createIndexWithDocs(indexName); + createIndexWithDocs(indexName, dataOnlyNodes); ensureYellowAndNoInitializingShards(indexName); @@ -84,12 +86,12 @@ public void testReopenDuringClose() throws Exception { assertIndexIsOpened(indexName); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39757") public void testReopenDuringCloseOnMultipleIndices() throws Exception { + List dataOnlyNodes = internalCluster().startDataOnlyNodes(randomIntBetween(2, 3)); final List indices = new ArrayList<>(); for (int i = 0; i < randomIntBetween(2, 10); i++) { indices.add("index-" + i); - createIndexWithDocs(indices.get(i)); + createIndexWithDocs(indices.get(i), dataOnlyNodes); } ensureYellowAndNoInitializingShards(indices.toArray(Strings.EMPTY_ARRAY)); @@ -117,8 +119,9 @@ public void testReopenDuringCloseOnMultipleIndices() throws Exception { }); } - private void createIndexWithDocs(final String indexName) { - createIndex(indexName); + private void createIndexWithDocs(final String indexName, final Collection dataOnlyNodes) { + createIndex(indexName, + Settings.builder().put(indexSettings()).put("index.routing.allocation.include._name", String.join(",", dataOnlyNodes)).build()); final int nbDocs = scaledRandomIntBetween(1, 100); for (int i = 0; i < nbDocs; i++) { index(indexName, "_doc", String.valueOf(i), "num", i); From 7cf9eb2476fce3166b34424f122014167b9a1845 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 27 May 2019 21:44:36 -0400 Subject: [PATCH 291/321] Reset mock transport service in CcrRetentionLeaseIT (#42600) testRetentionLeaseIsAddedIfItDisappearsWhileFollowing does not reset the mock transport service after test. Surviving transport interceptors from that test can sneaky remove retention leases and make other tests fail. Closes #39331 Closes #39509 Closes #41428 Closes #41679 Closes #41737 Closes #41756 --- .../xpack/ccr/CcrRetentionLeaseIT.java | 87 ++++++++++--------- 1 file changed, 46 insertions(+), 41 deletions(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index eb4f4be84233b..9b3eaa7de55bb 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ccr; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -44,7 +43,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteTransportException; @@ -88,7 +86,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; -@TestLogging(value = "org.elasticsearch.xpack.ccr:trace,org.elasticsearch.indices.recovery:trace,org.elasticsearch.index.seqno:debug") public class CcrRetentionLeaseIT extends CcrIntegTestCase { public static final class RetentionLeaseRenewIntervalSettingPlugin extends Plugin { @@ -224,9 +221,9 @@ public void testRetentionLeaseIsRenewedDuringRecovery() throws Exception { // block the recovery from completing; this ensures the background sync is still running final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (ClearCcrRestoreSessionAction.NAME.equals(action) @@ -248,9 +245,9 @@ public void testRetentionLeaseIsRenewedDuringRecovery() throws Exception { assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); latch.countDown(); } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } @@ -405,9 +402,9 @@ public void testUnfollowRemovesRetentionLeases() throws Exception { final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Remove.ACTION_NAME.equals(action) @@ -456,9 +453,9 @@ public void testUnfollowRemovesRetentionLeases() throws Exception { assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } @@ -488,9 +485,9 @@ public void testUnfollowFailsToRemoveRetentionLeases() throws Exception { final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Remove.ACTION_NAME.equals(action) @@ -526,9 +523,9 @@ public void testUnfollowFailsToRemoveRetentionLeases() throws Exception { getLeaderCluster().getClusterName(), new Index(leaderIndex, leaderUUID)))); } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } @@ -766,35 +763,36 @@ public void testRetentionLeaseIsAddedIfItDisappearsWhileFollowing() throws Excep final CountDownLatch latch = new CountDownLatch(1); final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { - final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); - senderTransportService.addSendBehavior( + try { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Renew.ACTION_NAME.equals(action) - || TransportActionProxy.getProxyAction(RetentionLeaseActions.Renew.ACTION_NAME).equals(action)) { + || TransportActionProxy.getProxyAction(RetentionLeaseActions.Renew.ACTION_NAME).equals(action)) { senderTransportService.clearAllRules(); final RetentionLeaseActions.RenewRequest renewRequest = (RetentionLeaseActions.RenewRequest) request; final String primaryShardNodeId = - getLeaderCluster() - .clusterService() - .state() - .routingTable() - .index(leaderIndex) - .shard(renewRequest.getShardId().id()) - .primaryShard() - .currentNodeId(); + getLeaderCluster() + .clusterService() + .state() + .routingTable() + .index(leaderIndex) + .shard(renewRequest.getShardId().id()) + .primaryShard() + .currentNodeId(); final String primaryShardNodeName = - getLeaderCluster().clusterService().state().nodes().get(primaryShardNodeId).getName(); + getLeaderCluster().clusterService().state().nodes().get(primaryShardNodeId).getName(); final IndexShard primary = - getLeaderCluster() - .getInstance(IndicesService.class, primaryShardNodeName) - .getShardOrNull(renewRequest.getShardId()); + getLeaderCluster() + .getInstance(IndicesService.class, primaryShardNodeName) + .getShardOrNull(renewRequest.getShardId()); final CountDownLatch innerLatch = new CountDownLatch(1); // this forces the background renewal from following to face a retention lease not found exception primary.removeRetentionLease( - getRetentionLeaseId(followerIndex, leaderIndex), - ActionListener.wrap(r -> innerLatch.countDown(), e -> fail(e.toString()))); + getRetentionLeaseId(followerIndex, leaderIndex), + ActionListener.wrap(r -> innerLatch.countDown(), e -> fail(e.toString()))); try { innerLatch.await(); @@ -807,11 +805,18 @@ public void testRetentionLeaseIsAddedIfItDisappearsWhileFollowing() throws Excep } connection.sendRequest(requestId, action, request, options); }); - } + } - latch.await(); + latch.await(); - assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); + assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); + } finally { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.clearAllRules(); + } + } } /** @@ -858,9 +863,9 @@ public void testPeriodicRenewalDoesNotAddRetentionLeaseAfterUnfollow() throws Ex final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Renew.ACTION_NAME.equals(action) @@ -914,9 +919,9 @@ public void onResponseReceived( assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } From 7209f9769092d6caee82822f1e23195e14a0ae46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20Kaymak?= Date: Tue, 28 May 2019 10:37:04 +0300 Subject: [PATCH 292/321] Fixed ignoring name parameter for percolator queries (#42598) Closes #40405 --- .../percolator/PercolateQueryBuilder.java | 21 ++++++++++++--- .../PercolateQueryBuilderTests.java | 26 +++++++++++++++++++ 2 files changed, 43 insertions(+), 4 deletions(-) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 3021f5b31606e..151dd8b9e61ba 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -178,7 +178,7 @@ public PercolateQueryBuilder(String field, String documentType, List documentSupplier) { + protected PercolateQueryBuilder(String field, String documentType, Supplier documentSupplier) { if (field == null) { throw new IllegalArgumentException("[field] is a required argument"); } @@ -491,8 +491,12 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) { if (source == null) { return this; // not executed yet } else { - return new PercolateQueryBuilder(field, documentType, Collections.singletonList(source), - XContentHelper.xContentType(source)); + PercolateQueryBuilder rewritten = new PercolateQueryBuilder(field, documentType, + Collections.singletonList(source), XContentHelper.xContentType(source)); + if (name != null) { + rewritten.setName(name); + } + return rewritten; } } GetRequest getRequest; @@ -527,7 +531,12 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) { listener.onResponse(null); }, listener::onFailure)); }); - return new PercolateQueryBuilder(field, documentType, documentSupplier::get); + + PercolateQueryBuilder rewritten = new PercolateQueryBuilder(field, documentType, documentSupplier::get); + if (name != null) { + rewritten.setName(name); + } + return rewritten; } @Override @@ -626,6 +635,10 @@ XContentType getXContentType() { return documentXContentType; } + public String getQueryName() { + return name; + } + static IndexSearcher createMultiDocumentSearcher(Analyzer analyzer, Collection docs) { RAMDirectory ramDirectory = new RAMDirectory(); try (IndexWriter indexWriter = new IndexWriter(ramDirectory, new IndexWriterConfig(analyzer))) { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index 6053a92b54a20..5b4dc61090042 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -54,6 +54,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Supplier; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.equalTo; @@ -331,4 +332,29 @@ public void testFieldAlias() throws IOException { assertEquals(query.getVerifiedMatchesQuery(), aliasQuery.getVerifiedMatchesQuery()); } + public void testSettingNameWhileRewriting() { + String testName = "name1"; + QueryShardContext shardContext = createShardContext(); + PercolateQueryBuilder percolateQueryBuilder = doCreateTestQueryBuilder(true); + percolateQueryBuilder.setName(testName); + + QueryBuilder rewrittenQueryBuilder = percolateQueryBuilder.doRewrite(shardContext); + + assertEquals(testName, ((PercolateQueryBuilder) rewrittenQueryBuilder).getQueryName()); + assertNotEquals(rewrittenQueryBuilder, percolateQueryBuilder); + } + + public void testSettingNameWhileRewritingWhenDocumentSupplierAndSourceNotNull() { + Supplier supplier = () -> new BytesArray("{\"test\": \"test\"}"); + String testName = "name1"; + QueryShardContext shardContext = createShardContext(); + PercolateQueryBuilder percolateQueryBuilder = new PercolateQueryBuilder(queryField, null, supplier); + percolateQueryBuilder.setName(testName); + + QueryBuilder rewrittenQueryBuilder = percolateQueryBuilder.doRewrite(shardContext); + + assertEquals(testName, ((PercolateQueryBuilder) rewrittenQueryBuilder).getQueryName()); + assertNotEquals(rewrittenQueryBuilder, percolateQueryBuilder); + } + } From 578c05e771509586d426b0ad1ff8af5f1f3bf351 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 28 May 2019 09:15:03 +0100 Subject: [PATCH 293/321] [Ml Data Frame] Return bad_request on preview when config is invalid (#42447) --- ...nsportPreviewDataFrameTransformAction.java | 24 +++++++---- .../test/data_frame/preview_transforms.yml | 43 +++++++++++++++++++ 2 files changed, 59 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java index f4b93cc6ac412..dde9edb37e55c 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; +import org.elasticsearch.xpack.dataframe.transforms.pivot.AggregationResultUtils; import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; import java.util.List; @@ -102,14 +103,21 @@ private void getPreview(Pivot pivot, SourceConfig source, ActionListener { - final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME); - DataFrameIndexerTransformStats stats = DataFrameIndexerTransformStats.withDefaultTransformId(); - // remove all internal fields - List> results = pivot.extractResults(agg, deducedMappings, stats) - .peek(record -> { - record.keySet().removeIf(k -> k.startsWith("_")); - }).collect(Collectors.toList()); - listener.onResponse(results); + + try { + final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME); + DataFrameIndexerTransformStats stats = DataFrameIndexerTransformStats.withDefaultTransformId(); + // remove all internal fields + List> results = pivot.extractResults(agg, deducedMappings, stats) + .peek(record -> { + record.keySet().removeIf(k -> k.startsWith("_")); + }).collect(Collectors.toList()); + + listener.onResponse(results); + } catch (AggregationResultUtils.AggregationExtractionException extractionException) { + listener.onFailure( + new ElasticsearchStatusException(extractionException.getMessage(), RestStatus.BAD_REQUEST)); + } }, listener::onFailure )); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index 5e58048b3bf0f..090243d1d966b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -127,3 +127,46 @@ setup: "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } + +--- +"Test preview returns bad request with invalid agg": + - skip: + reason: date histo interval is deprecated + features: "warnings" + + - do: + warnings: + - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." + catch: bad_request + data_frame.preview_data_frame_transform: + body: > + { + "source": { "index": "airline-data" }, + "pivot": { + "group_by": { + "time": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "aggs": { + "avg_response": {"avg": {"field": "responsetime"}}, + "time.min": {"min": {"field": "time"}} + } + } + } + + - do: + warnings: + - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." + catch: /mixed object types of nested and non-nested fields \[time.min\]/ + data_frame.preview_data_frame_transform: + body: > + { + "source": { "index": "airline-data" }, + "pivot": { + "group_by": { + "time": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "aggs": { + "avg_response": {"avg": {"field": "responsetime"}}, + "time.min": {"min": {"field": "time"}} + } + } + } + From 777be0908d7bb96d5231cea8639c0fff92f9c3a3 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Tue, 28 May 2019 10:20:02 +0200 Subject: [PATCH 294/321] Mute AsyncTwoPhaseIndexerTests#testStateMachine() (#42609) Relates #42084 --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 4249d7c61d0ad..95b3de5eb333e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -225,6 +225,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStateMachine() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); From 0de11779a7aadbf46b777fdcbc4bd541c3208ea7 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 28 May 2019 09:58:35 +0100 Subject: [PATCH 295/321] [ML DataFrame] Use date histogram fixed_interval syntax and remove test skip --- .../test/data_frame/preview_transforms.yml | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index 090243d1d966b..7b5c4e8cb5664 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -130,13 +130,7 @@ setup: --- "Test preview returns bad request with invalid agg": - - skip: - reason: date histo interval is deprecated - features: "warnings" - - do: - warnings: - - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." catch: bad_request data_frame.preview_data_frame_transform: body: > @@ -144,7 +138,7 @@ setup: "source": { "index": "airline-data" }, "pivot": { "group_by": { - "time": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "time": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, "aggs": { "avg_response": {"avg": {"field": "responsetime"}}, "time.min": {"min": {"field": "time"}} @@ -153,8 +147,6 @@ setup: } - do: - warnings: - - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." catch: /mixed object types of nested and non-nested fields \[time.min\]/ data_frame.preview_data_frame_transform: body: > @@ -162,7 +154,7 @@ setup: "source": { "index": "airline-data" }, "pivot": { "group_by": { - "time": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "time": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, "aggs": { "avg_response": {"avg": {"field": "responsetime"}}, "time.min": {"min": {"field": "time"}} From 5f651f4ab88493be7959c4042ba65d33def71c89 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Tue, 28 May 2019 12:20:20 +0200 Subject: [PATCH 296/321] Mute NodeTests (#42614) Relates #42577 --- server/src/test/java/org/elasticsearch/node/NodeTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index 6f0419421b868..a5653eb88e176 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -50,6 +50,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42577") @LuceneTestCase.SuppressFileSystems(value = "ExtrasFS") public class NodeTests extends ESTestCase { From e97bee606845c276b48e87f38fd8449c5f230ef4 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 28 May 2019 12:38:19 +0200 Subject: [PATCH 297/321] Fix Incorrect Time Math in MockTransport (#42595) * Fix Incorrect Time Math in MockTransport * The timeunit here must be nanos for the current time (we even convert it accordingly in the logging) * Also, changed the log message when dumping stack traces a little to make it easier to grep for (otherwise it's the same as the message on unregister) --- .../org/elasticsearch/transport/nio/MockNioTransport.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 9470b7548adfb..86c7f77fa7bee 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -363,11 +363,11 @@ private void maybeLogElapsedTime(long startTime) { private void logLongRunningExecutions() { for (Map.Entry entry : registry.entrySet()) { - final long elapsedTime = threadPool.relativeTimeInMillis() - entry.getValue(); - if (elapsedTime > WARN_THRESHOLD) { + final long elapsedTimeInNanos = threadPool.relativeTimeInNanos() - entry.getValue(); + if (elapsedTimeInNanos > WARN_THRESHOLD) { final Thread thread = entry.getKey(); - logger.warn("Slow execution on network thread [{}] [{} milliseconds]: \n{}", thread.getName(), - TimeUnit.NANOSECONDS.toMillis(elapsedTime), + logger.warn("Potentially blocked execution on network thread [{}] [{} milliseconds]: \n{}", thread.getName(), + TimeUnit.NANOSECONDS.toMillis(elapsedTimeInNanos), Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n"))); } } From 2c8440922f99e1ffc95487d2d3130168898d448b Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 28 May 2019 12:25:51 +0100 Subject: [PATCH 298/321] Remove PRE_60_NODE_CHECKPOINT (#42527) This commit removes the obsolete `PRE_60_NODE_CHECKPOINT` constant for dealing with 5.x nodes' lack of sequence number support. Backported as #42531 --- .../index/seqno/ReplicationTracker.java | 37 +++++-------------- .../index/seqno/SequenceNumbers.java | 4 -- 2 files changed, 10 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index cf0fe6a5d25e1..c272816ed3815 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -539,9 +539,7 @@ private boolean invariant() { "checkpoints map should always have an entry for the current shard"; // local checkpoints only set during primary mode - assert primaryMode || checkpoints.values().stream() - .allMatch(lcps -> lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO || - lcps.localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT); + assert primaryMode || checkpoints.values().stream().allMatch(lcps -> lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); // global checkpoints for other shards only set during primary mode assert primaryMode @@ -550,9 +548,7 @@ private boolean invariant() { .stream() .filter(e -> e.getKey().equals(shardAllocationId) == false) .map(Map.Entry::getValue) - .allMatch(cps -> - (cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO - || cps.globalCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT)); + .allMatch(cps -> cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); // relocation handoff can only occur in primary mode assert !handoffInProgress || primaryMode; @@ -631,7 +627,7 @@ private static long inSyncCheckpointStates( .stream() .filter(cps -> cps.inSync) .mapToLong(function) - .filter(v -> v != SequenceNumbers.PRE_60_NODE_CHECKPOINT && v != SequenceNumbers.UNASSIGNED_SEQ_NO)); + .filter(v -> v != SequenceNumbers.UNASSIGNED_SEQ_NO)); return value.isPresent() ? value.getAsLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; } @@ -916,13 +912,9 @@ public synchronized void markAllocationIdAsInSync(final String allocationId, fin } private boolean updateLocalCheckpoint(String allocationId, CheckpointState cps, long localCheckpoint) { - // a local checkpoint of PRE_60_NODE_CHECKPOINT cannot be overridden - assert cps.localCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT || - localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT : - "pre-6.0 shard copy " + allocationId + " unexpected to send valid local checkpoint " + localCheckpoint; - // a local checkpoint for a shard copy should be a valid sequence number or the pre-6.0 sequence number indicator - assert localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO : - "invalid local checkpoint for shard copy [" + allocationId + "]"; + // a local checkpoint for a shard copy should be a valid sequence number + assert localCheckpoint >= SequenceNumbers.NO_OPS_PERFORMED : + "invalid local checkpoint [" + localCheckpoint + "] for shard copy [" + allocationId + "]"; if (localCheckpoint > cps.localCheckpoint) { logger.trace("updated local checkpoint of [{}] from [{}] to [{}]", allocationId, cps.localCheckpoint, localCheckpoint); cps.localCheckpoint = localCheckpoint; @@ -981,8 +973,6 @@ private static long computeGlobalCheckpoint(final Set pendingInSync, fin if (cps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { // unassigned in-sync replica return fallback; - } else if (cps.localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT) { - // 5.x replica, ignore for global checkpoint calculation } else { minLocalCheckpoint = Math.min(cps.localCheckpoint, minLocalCheckpoint); } @@ -1054,18 +1044,11 @@ public synchronized void completeRelocationHandoff() { handoffInProgress = false; relocated = true; // forget all checkpoint information except for global checkpoint of current shard - checkpoints.entrySet().stream().forEach(e -> { - final CheckpointState cps = e.getValue(); - if (cps.localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && - cps.localCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT) { - cps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } - if (e.getKey().equals(shardAllocationId) == false) { + checkpoints.forEach((key, cps) -> { + cps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + if (key.equals(shardAllocationId) == false) { // don't throw global checkpoint information of current shard away - if (cps.globalCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && - cps.globalCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT) { - cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } }); assert invariant(); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index 6336e83338f8c..87257a97076da 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -28,10 +28,6 @@ public class SequenceNumbers { public static final String LOCAL_CHECKPOINT_KEY = "local_checkpoint"; public static final String MAX_SEQ_NO = "max_seq_no"; - /** - * Represents a checkpoint coming from a pre-6.0 node - */ - public static final long PRE_60_NODE_CHECKPOINT = -3L; /** * Represents an unassigned sequence number (e.g., can be used on primary operations before they are executed). */ From 692245cc447d748b8233d588bb38cc261227d6f5 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 28 May 2019 13:35:01 +0200 Subject: [PATCH 299/321] Reset state recovery after successful recovery (#42576) The problem this commit addresses is that state recovery is not reset on a node that then becomes master with a cluster state that has a state not recovered flag in it. The situation that was observed in a failed test run of MinimumMasterNodesIT.testThreeNodesNoMasterBlock (see below) is that we have 3 master nodes (node_t0, node_t1, node_t2), two of them are shut down (node_t2 remains), when the first one comes back (renamed to node_t4) it becomes leader in term 2 and sends state (with state_not_recovered_block) to node_t2, which accepts. node_t2 becomes leader in term 3, and as it was previously leader in term1 and successfully completed state recovery, does never retry state recovery in term 3. Closes #39172 --- .../elasticsearch/cluster/ClusterState.java | 3 +- .../cluster/coordination/Coordinator.java | 1 - .../elasticsearch/gateway/GatewayService.java | 11 ++-- .../coordination/CoordinatorTests.java | 61 ++++++++++++++++++- 4 files changed, 67 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 6a5e2a324965f..6cde7d5b3bb10 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -297,7 +297,8 @@ public RoutingNodes getRoutingNodes() { public String toString() { StringBuilder sb = new StringBuilder(); final String TAB = " "; - sb.append("cluster uuid: ").append(metaData.clusterUUID()).append("\n"); + sb.append("cluster uuid: ").append(metaData.clusterUUID()) + .append(" [committed: ").append(metaData.clusterUUIDCommitted()).append("]").append("\n"); sb.append("version: ").append(version).append("\n"); sb.append("state uuid: ").append(stateUUID).append("\n"); sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 376dd640c56b2..6d86bb613be43 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -699,7 +699,6 @@ public void invariant() { assert followersChecker.getFastResponseState().term == getCurrentTerm() : followersChecker.getFastResponseState(); assert followersChecker.getFastResponseState().mode == getMode() : followersChecker.getFastResponseState(); assert (applierState.nodes().getMasterNodeId() == null) == applierState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID); - assert applierState.nodes().getMasterNodeId() == null || applierState.metaData().clusterUUIDCommitted(); assert preVoteCollector.getPreVoteResponse().equals(getPreVoteResponse()) : preVoteCollector + " vs " + getPreVoteResponse(); diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index b7b7d0759980e..3e9c25847f6a7 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -85,7 +85,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste private final Runnable recoveryRunnable; - private final AtomicBoolean recovered = new AtomicBoolean(); + private final AtomicBoolean recoveryInProgress = new AtomicBoolean(); private final AtomicBoolean scheduledRecovery = new AtomicBoolean(); @Inject @@ -211,7 +211,7 @@ public void onFailure(Exception e) { @Override protected void doRun() { - if (recovered.compareAndSet(false, true)) { + if (recoveryInProgress.compareAndSet(false, true)) { logger.info("recover_after_time [{}] elapsed. performing state recovery...", recoverAfterTime); recoveryRunnable.run(); } @@ -219,7 +219,7 @@ protected void doRun() { }, recoverAfterTime, ThreadPool.Names.GENERIC); } } else { - if (recovered.compareAndSet(false, true)) { + if (recoveryInProgress.compareAndSet(false, true)) { threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(final Exception e) { @@ -237,7 +237,7 @@ protected void doRun() { } private void resetRecoveredFlags() { - recovered.set(false); + recoveryInProgress.set(false); scheduledRecovery.set(false); } @@ -256,6 +256,9 @@ public ClusterState execute(final ClusterState currentState) { @Override public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { logger.info("recovered [{}] indices into cluster_state", newState.metaData().indices().size()); + // reset flag even though state recovery completed, to ensure that if we subsequently become leader again based on a + // not-recovered state, that we again do another state recovery. + resetRecoveredFlags(); } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 5daa863402b2a..4f1937efc9c74 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -69,6 +70,8 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SeedHostsProvider.HostsResolver; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.gateway.ClusterStateUpdaters; +import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MockGatewayMetaState; import org.elasticsearch.indices.cluster.FakeThreadPoolMasterService; @@ -130,6 +133,7 @@ import static org.elasticsearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_WRITES; import static org.elasticsearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; import static org.elasticsearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; +import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; import static org.hamcrest.Matchers.containsString; @@ -190,6 +194,45 @@ public void testRepeatableTests() throws Exception { assertEquals(result1, result2); } + /** + * This test was added to verify that state recovery is properly reset on a node after it has become master and successfully + * recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows: + * 3 master-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back + * one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it. + * Old leader is initiating an election at the same time, and wins election. It becomes leader again, but as it previously + * successfully completed state recovery, is never reset to a state where state recovery can be retried. + */ + public void testStateRecoveryResetAfterPreviousLeadership() { + final Cluster cluster = new Cluster(3); + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode leader = cluster.getAnyLeader(); + final ClusterNode follower1 = cluster.getAnyNodeExcept(leader); + final ClusterNode follower2 = cluster.getAnyNodeExcept(leader, follower1); + + // restart follower1 and follower2 + for (ClusterNode clusterNode : Arrays.asList(follower1, follower2)) { + clusterNode.close(); + cluster.clusterNodes.forEach( + cn -> cluster.deterministicTaskQueue.scheduleNow(cn.onNode( + new Runnable() { + @Override + public void run() { + cn.transportService.disconnectFromNode(clusterNode.getLocalNode()); + } + + @Override + public String toString() { + return "disconnect from " + clusterNode.getLocalNode() + " after shutdown"; + } + }))); + cluster.clusterNodes.replaceAll(cn -> cn == clusterNode ? cn.restartedNode() : cn); + } + + cluster.stabilise(); + } + public void testCanUpdateClusterStateAfterStabilisation() { final Cluster cluster = new Cluster(randomIntBetween(1, 5)); cluster.runRandomly(); @@ -1524,6 +1567,10 @@ void stabilise(long stabilisationDurationMillis) { assertTrue(leaderId + " has been bootstrapped", leader.coordinator.isInitialConfigurationSet()); assertTrue(leaderId + " exists in its last-applied state", leader.getLastAppliedClusterState().getNodes().nodeExists(leaderId)); + assertThat(leaderId + " has no NO_MASTER_BLOCK", + leader.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(leaderId + " has no STATE_NOT_RECOVERED_BLOCK", + leader.getLastAppliedClusterState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK), equalTo(false)); assertThat(leaderId + " has applied its state ", leader.getLastAppliedClusterState().getVersion(), isEqualToLeaderVersion); for (final ClusterNode clusterNode : clusterNodes) { @@ -1555,6 +1602,8 @@ void stabilise(long stabilisationDurationMillis) { equalTo(leader.getLocalNode())); assertThat(nodeId + " has no NO_MASTER_BLOCK", clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(nodeId + " has no STATE_NOT_RECOVERED_BLOCK", + clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK), equalTo(false)); } else { assertThat(nodeId + " is not following " + leaderId, clusterNode.coordinator.getMode(), is(CANDIDATE)); assertThat(nodeId + " has no master", clusterNode.getLastAppliedClusterState().nodes().getMasterNode(), nullValue()); @@ -1724,7 +1773,8 @@ class MockPersistedState implements PersistedState { } else { nodeEnvironment = null; delegate = new InMemoryPersistedState(0L, - clusterState(0L, 0L, localNode, VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L)); + ClusterStateUpdaters.addStateNotRecoveredBlock( + clusterState(0L, 0L, localNode, VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L))); } } catch (IOException e) { throw new UncheckedIOException("Unable to create MockPersistedState", e); @@ -1764,8 +1814,9 @@ class MockPersistedState implements PersistedState { clusterState.writeTo(outStream); StreamInput inStream = new NamedWriteableAwareStreamInput(outStream.bytes().streamInput(), new NamedWriteableRegistry(ClusterModule.getNamedWriteables())); + // adapt cluster state to new localNode instance and add blocks delegate = new InMemoryPersistedState(adaptCurrentTerm.apply(oldState.getCurrentTerm()), - ClusterState.readFrom(inStream, newLocalNode)); // adapts it to new localNode instance + ClusterStateUpdaters.addStateNotRecoveredBlock(ClusterState.readFrom(inStream, newLocalNode))); } } catch (IOException e) { throw new UncheckedIOException("Unable to create MockPersistedState", e); @@ -1869,15 +1920,19 @@ protected Optional getDisruptableMockTransport(Transpo transportService)); final Collection> onJoinValidators = Collections.singletonList((dn, cs) -> extraJoinValidators.forEach(validator -> validator.accept(dn, cs))); + final AllocationService allocationService = ESAllocationTestCase.createAllocationService(Settings.EMPTY); coordinator = new Coordinator("test_node", settings, clusterSettings, transportService, writableRegistry(), - ESAllocationTestCase.createAllocationService(Settings.EMPTY), masterService, this::getPersistedState, + allocationService, masterService, this::getPersistedState, Cluster.this::provideSeedHosts, clusterApplierService, onJoinValidators, Randomness.get()); masterService.setClusterStatePublisher(coordinator); + final GatewayService gatewayService = new GatewayService(settings, allocationService, clusterService, + deterministicTaskQueue.getThreadPool(this::onNode), null, coordinator); logger.trace("starting up [{}]", localNode); transportService.start(); transportService.acceptIncomingRequests(); coordinator.start(); + gatewayService.start(); clusterService.start(); coordinator.startInitialJoin(); } From b57cbb67e5490495761ec0929c347db92d6aa65c Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 28 May 2019 08:47:18 -0400 Subject: [PATCH 300/321] [DOCS] Escape cross-ref link comma for Asciidoctor (#42402) --- docs/reference/rollup/rollup-api.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/rollup/rollup-api.asciidoc b/docs/reference/rollup/rollup-api.asciidoc index 099686fb4329d..5981336d0a054 100644 --- a/docs/reference/rollup/rollup-api.asciidoc +++ b/docs/reference/rollup/rollup-api.asciidoc @@ -9,7 +9,7 @@ * <>, <>, * <>, <>, -* <> +* <> * <> [float] From 69ef51d1418915148a313f13a4dad652a7dbc922 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 28 May 2019 08:52:59 -0400 Subject: [PATCH 301/321] [DOCS] Fix API Quick Reference rollup attribute for Asciidoctor (#42403) --- docs/reference/rollup/api-quickref.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/rollup/api-quickref.asciidoc b/docs/reference/rollup/api-quickref.asciidoc index 21eefefb4b12b..d1ea03b6284d7 100644 --- a/docs/reference/rollup/api-quickref.asciidoc +++ b/docs/reference/rollup/api-quickref.asciidoc @@ -5,7 +5,7 @@ experimental[] -Most {rollup} endpoints have the following base: +Most rollup endpoints have the following base: [source,js] ---- From f2cde97a3b3d865ac6efba01f5b32a9fc4e8fb40 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 28 May 2019 10:03:39 -0400 Subject: [PATCH 302/321] [ML] adding delayed_data_check_config to datafeed update docs (#42095) * [ML] adding delayed_data_check_config to datafeed update docs * [DOCS] Edits delayed data configuration details --- .../ml/apis/datafeedresource.asciidoc | 30 ++++++++++--------- docs/reference/ml/apis/put-datafeed.asciidoc | 9 +++--- .../ml/apis/update-datafeed.asciidoc | 9 +++++- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/docs/reference/ml/apis/datafeedresource.asciidoc b/docs/reference/ml/apis/datafeedresource.asciidoc index 33fce3dbf7c9d..5c1e3e74a6ae8 100644 --- a/docs/reference/ml/apis/datafeedresource.asciidoc +++ b/docs/reference/ml/apis/datafeedresource.asciidoc @@ -61,12 +61,12 @@ A {dfeed} resource has the following properties: `delayed_data_check_config`:: (object) Specifies whether the data feed checks for missing data and - and the size of the window. For example: + the size of the window. For example: `{"enabled": true, "check_window": "1h"}` See <>. [[ml-datafeed-chunking-config]] -==== Chunking Configuration Objects +==== Chunking configuration objects {dfeeds-cap} might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load @@ -88,31 +88,33 @@ A chunking configuration object has the following properties: For example: `3h`. [[ml-datafeed-delayed-data-check-config]] -==== Delayed Data Check Configuration Objects +==== Delayed data check configuration objects The {dfeed} can optionally search over indices that have already been read in -an effort to find if any data has since been added to the index. If missing data -is found, it is a good indication that the `query_delay` option is set too low and -the data is being indexed after the {dfeed} has passed that moment in time. See +an effort to determine whether any data has subsequently been added to the index. +If missing data is found, it is a good indication that the `query_delay` option +is set too low and the data is being indexed after the {dfeed} has passed that +moment in time. See {stack-ov}/ml-delayed-data-detection.html[Working with delayed data]. -This check only runs on real-time {dfeeds} +This check runs only on real-time {dfeeds}. The configuration object has the following properties: `enabled`:: - (boolean) Should the {dfeed} periodically check for data being indexed after reading. - Defaults to `true` + (boolean) Specifies whether the {dfeed} periodically checks for delayed data. + Defaults to `true`. `check_window`:: - (time units) The window of time before the latest finalized bucket that should be searched - for late data. Defaults to `null` which causes an appropriate `check_window` to be calculated - when the real-time {dfeed} runs. - The default `check_window` span calculation is the max between `2h` or `8 * bucket_span`. + (time units) The window of time that is searched for late data. This window of + time ends with the latest finalized bucket. It defaults to `null`, which + causes an appropriate `check_window` to be calculated when the real-time + {dfeed} runs. In particular, the default `check_window` span calculation is + based on the maximum of `2h` or `8 * bucket_span`. [float] [[ml-datafeed-counts]] -==== {dfeed-cap} Counts +==== {dfeed-cap} counts The get {dfeed} statistics API provides information about the operational progress of a {dfeed}. All of these properties are informational; you cannot diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index 52728dd093da5..2e0f6700191cd 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -45,6 +45,11 @@ IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {df (object) Specifies how data searches are split into time chunks. See <>. +`delayed_data_check_config`:: + (object) Specifies whether the data feed checks for missing data and + the size of the window. See + <>. + `frequency`:: (time units) The interval at which scheduled queries are made while the {dfeed} runs in real time. The default value is either the bucket span for short @@ -82,10 +87,6 @@ IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {df (unsigned integer) The `size` parameter that is used in {es} searches. The default value is `1000`. -`delayed_data_check_config`:: - (object) Specifies if and with how large a window should the data feed check - for missing data. See <>. - For more information about these properties, see <>. diff --git a/docs/reference/ml/apis/update-datafeed.asciidoc b/docs/reference/ml/apis/update-datafeed.asciidoc index a370c1acef9d7..63878913c7f1a 100644 --- a/docs/reference/ml/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/apis/update-datafeed.asciidoc @@ -14,7 +14,10 @@ Updates certain properties of a {dfeed}. `POST _ml/datafeeds//_update` -//===== Description +===== Description + +NOTE: If you update the `delayed_data_check_config` property, you must stop and +start the {dfeed} for the change to be applied. ==== Path Parameters @@ -32,6 +35,10 @@ The following properties can be updated after the {dfeed} is created: `chunking_config`:: (object) Specifies how data searches are split into time chunks. See <>. + +`delayed_data_check_config`:: + (object) Specifies whether the data feed checks for missing data and + the size of the window. See <>. `frequency`:: (time units) The interval at which scheduled queries are made while the From 1ef00e368b4c25bacd59a2598c60b510044da163 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 28 May 2019 15:23:55 +0100 Subject: [PATCH 303/321] Avoid loading retention leases while writing them (#42620) Resolves #41430. --- .../org/elasticsearch/index/seqno/ReplicationTracker.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index c272816ed3815..1a67eb55e0576 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -346,7 +346,10 @@ public synchronized void updateRetentionLeasesOnReplica(final RetentionLeases re * @throws IOException if an I/O exception occurs reading the retention leases */ public RetentionLeases loadRetentionLeases(final Path path) throws IOException { - final RetentionLeases retentionLeases = RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); + final RetentionLeases retentionLeases; + synchronized (retentionLeasePersistenceLock) { + retentionLeases = RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); + } // TODO after backporting we expect this never to happen in 8.x, so adjust this to throw an exception instead. assert Version.CURRENT.major <= 8 : "throw an exception instead of returning EMPTY on null"; From ae783879626ace085d30c3debd0a247de8535301 Mon Sep 17 00:00:00 2001 From: Vigya Sharma Date: Tue, 28 May 2019 20:12:46 +0530 Subject: [PATCH 304/321] Validate routing commands using updated routing state (#42066) When multiple commands are called in sequence, fetch shards from mutable, up-to-date routing nodes to ensure each command's changes are visible to subsequent commands. This addresses an issue uncovered during work on #41050. --- ...AllocateEmptyPrimaryAllocationCommand.java | 13 +++- .../AllocateReplicaAllocationCommand.java | 26 +++++-- ...AllocateStalePrimaryAllocationCommand.java | 13 +++- .../allocation/AllocationCommandsTests.java | 72 +++++++++++++++++++ 4 files changed, 112 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java index 4d037570dd266..2e3219e67c7ae 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java @@ -110,13 +110,20 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) return explainOrThrowMissingRoutingNode(allocation, explain, discoNode); } - final ShardRouting shardRouting; try { - shardRouting = allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); + allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); } catch (IndexNotFoundException | ShardNotFoundException e) { return explainOrThrowRejectedCommand(explain, allocation, e); } - if (shardRouting.unassigned() == false) { + + ShardRouting shardRouting = null; + for (ShardRouting shard : allocation.routingNodes().unassigned()) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary()) { + shardRouting = shard; + break; + } + } + if (shardRouting == null) { return explainOrThrowRejectedCommand(explain, allocation, "primary [" + index + "][" + shardId + "] is already assigned"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java index 709681f2b2008..5e1bcd81bb5fa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -35,6 +34,7 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import java.io.IOException; +import java.util.ArrayList; import java.util.List; /** @@ -101,20 +101,34 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) return explainOrThrowMissingRoutingNode(allocation, explain, discoNode); } - final ShardRouting primaryShardRouting; try { - primaryShardRouting = allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); + allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); } catch (IndexNotFoundException | ShardNotFoundException e) { return explainOrThrowRejectedCommand(explain, allocation, e); } - if (primaryShardRouting.unassigned()) { + + ShardRouting primaryShardRouting = null; + for (RoutingNode node : allocation.routingNodes()) { + for (ShardRouting shard : node) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary()) { + primaryShardRouting = shard; + break; + } + } + } + if (primaryShardRouting == null) { return explainOrThrowRejectedCommand(explain, allocation, "trying to allocate a replica shard [" + index + "][" + shardId + "], while corresponding primary shard is still unassigned"); } - List replicaShardRoutings = - allocation.routingTable().shardRoutingTable(index, shardId).replicaShardsWithState(ShardRoutingState.UNASSIGNED); + List replicaShardRoutings = new ArrayList<>(); + for (ShardRouting shard : allocation.routingNodes().unassigned()) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary() == false) { + replicaShardRoutings.add(shard); + } + } + ShardRouting shardRouting; if (replicaShardRoutings.isEmpty()) { return explainOrThrowRejectedCommand(explain, allocation, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java index f4c9aba17d71e..7e645c2cfcb6f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java @@ -108,13 +108,20 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) return explainOrThrowMissingRoutingNode(allocation, explain, discoNode); } - final ShardRouting shardRouting; try { - shardRouting = allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); + allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); } catch (IndexNotFoundException | ShardNotFoundException e) { return explainOrThrowRejectedCommand(explain, allocation, e); } - if (shardRouting.unassigned() == false) { + + ShardRouting shardRouting = null; + for (ShardRouting shard : allocation.routingNodes().unassigned()) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary()) { + shardRouting = shard; + break; + } + } + if (shardRouting == null) { return explainOrThrowRejectedCommand(explain, allocation, "primary [" + index + "][" + shardId + "] is already assigned"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index c966e3cac27dc..1405be54fd51e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -677,4 +677,76 @@ public void testMoveShardFromNonDataNode() { assertEquals("[move_allocation] can't move [test][0] from " + node2 + " to " + node1 + ": source [" + node2.getName() + "] is not a data node.", e.getMessage()); } + + public void testConflictingCommandsInSingleRequest() { + AllocationService allocation = createAllocationService(Settings.builder() + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") + .build()); + + final String index1 = "test1"; + final String index2 = "test2"; + final String index3 = "test3"; + logger.info("--> building initial routing table"); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder(index1).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1) + .putInSyncAllocationIds(0, Collections.singleton("randomAllocID")) + .putInSyncAllocationIds(1, Collections.singleton("randomAllocID2"))) + .put(IndexMetaData.builder(index2).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1) + .putInSyncAllocationIds(0, Collections.singleton("randomAllocID")) + .putInSyncAllocationIds(1, Collections.singleton("randomAllocID2"))) + .put(IndexMetaData.builder(index3).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1) + .putInSyncAllocationIds(0, Collections.singleton("randomAllocID")) + .putInSyncAllocationIds(1, Collections.singleton("randomAllocID2"))) + .build(); + RoutingTable routingTable = RoutingTable.builder() + .addAsRecovery(metaData.index(index1)) + .addAsRecovery(metaData.index(index2)) + .addAsRecovery(metaData.index(index3)) + .build(); + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).build(); + + final String node1 = "node1"; + final String node2 = "node2"; + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode(node1)) + .add(newNode(node2)) + ).build(); + final ClusterState finalClusterState = allocation.reroute(clusterState, "reroute"); + + logger.info("--> allocating same index primary in multiple commands should fail"); + assertThat(expectThrows(IllegalArgumentException.class, () -> { + allocation.reroute(finalClusterState, + new AllocationCommands( + new AllocateStalePrimaryAllocationCommand(index1, 0, node1, true), + new AllocateStalePrimaryAllocationCommand(index1, 0, node2, true) + ), false, false); + }).getMessage(), containsString("primary [" + index1 + "][0] is already assigned")); + + assertThat(expectThrows(IllegalArgumentException.class, () -> { + allocation.reroute(finalClusterState, + new AllocationCommands( + new AllocateEmptyPrimaryAllocationCommand(index2, 0, node1, true), + new AllocateEmptyPrimaryAllocationCommand(index2, 0, node2, true) + ), false, false); + }).getMessage(), containsString("primary [" + index2 + "][0] is already assigned")); + + + clusterState = allocation.reroute(clusterState, + new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand(index3, 0, node1, true)), false, false).getClusterState(); + clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + + final ClusterState updatedClusterState = clusterState; + assertThat(updatedClusterState.getRoutingNodes().node(node1).shardsWithState(STARTED).size(), equalTo(1)); + + logger.info("--> subsequent replica allocation fails as all configured replicas have been allocated"); + assertThat(expectThrows(IllegalArgumentException.class, () -> { + allocation.reroute(updatedClusterState, + new AllocationCommands( + new AllocateReplicaAllocationCommand(index3, 0, node2), + new AllocateReplicaAllocationCommand(index3, 0, node2) + ), false, false); + }).getMessage(), containsString("all copies of [" + index3 + "][0] are already assigned. Use the move allocation command instead")); + } } From 4a08b3d1c94cc5821841b6682ccc9606c4095112 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Tue, 28 May 2019 07:51:04 -0700 Subject: [PATCH 305/321] remove 6.4.x version constants (#42127) relates refactoring initiative #41164. --- .../action/PainlessExecuteAction.java | 18 ++------ .../main/java/org/elasticsearch/Version.java | 16 ------- .../index/mapper/TextFieldMapper.java | 15 +++---- .../search/slice/SliceBuilder.java | 8 +--- .../indices/close/CloseIndexRequestTests.java | 8 ++-- .../coordination/JoinTaskExecutorTests.java | 25 +++++------ .../index/mapper/NestedObjectMapperTests.java | 42 +++++++++++++++++++ .../index/mapper/TextFieldMapperTests.java | 19 ++------- .../query/SpanMultiTermQueryBuilderTests.java | 24 ++++------- .../elasticsearch/license/LicenseUtils.java | 12 +----- .../core/ml/action/GetJobsStatsAction.java | 9 +--- .../core/ml/job/config/DetectionRule.java | 3 -- .../xpack/core/ml/job/config/Detector.java | 6 +-- .../xpack/core/ml/job/config/MlFilter.java | 13 ++---- .../ml/job/results/CategoryDefinition.java | 9 +--- .../ml/action/TransportOpenJobAction.java | 9 ---- .../action/TransportOpenJobActionTests.java | 30 ------------- .../action/PutJobStateMachineTests.java | 5 ++- 18 files changed, 90 insertions(+), 181 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index cb407978da83e..7c8a368d38a58 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -313,25 +312,16 @@ public ActionRequestValidationException validate() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); script = new Script(in); - if (in.getVersion().before(Version.V_6_4_0)) { - byte scriptContextId = in.readByte(); - assert scriptContextId == 0; - } else { - context = fromScriptContextName(in.readString()); - contextSetup = in.readOptionalWriteable(ContextSetup::new); - } + context = fromScriptContextName(in.readString()); + contextSetup = in.readOptionalWriteable(ContextSetup::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); script.writeTo(out); - if (out.getVersion().before(Version.V_6_4_0)) { - out.writeByte((byte) 0); - } else { - out.writeString(context.name); - out.writeOptionalWriteable(contextSetup); - } + out.writeString(context.name); + out.writeOptionalWriteable(contextSetup); } // For testing only: diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index c685d39c7562f..48d37957844e2 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -46,14 +46,6 @@ public class Version implements Comparable, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - public static final int V_6_4_0_ID = 6040099; - public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); - public static final int V_6_4_1_ID = 6040199; - public static final Version V_6_4_1 = new Version(V_6_4_1_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); - public static final int V_6_4_2_ID = 6040299; - public static final Version V_6_4_2 = new Version(V_6_4_2_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); - public static final int V_6_4_3_ID = 6040399; - public static final Version V_6_4_3 = new Version(V_6_4_3_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_5_0_ID = 6050099; public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_5_1_ID = 6050199; @@ -136,14 +128,6 @@ public static Version fromId(int id) { return V_6_5_1; case V_6_5_0_ID: return V_6_5_0; - case V_6_4_3_ID: - return V_6_4_3; - case V_6_4_2_ID: - return V_6_4_2; - case V_6_4_1_ID: - return V_6_4_1; - case V_6_4_0_ID: - return V_6_4_0; case V_EMPTY_ID: return V_EMPTY; default: diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 8a3203ad8e7e0..6906ceb113b9c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -54,7 +54,6 @@ import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; -import org.elasticsearch.Version; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.settings.Settings; @@ -193,15 +192,11 @@ public TextFieldMapper build(BuilderContext context) { } // Copy the index options of the main field to allow phrase queries on // the prefix field. - if (context.indexCreatedVersion().onOrAfter(Version.V_6_4_0)) { - if (fieldType.indexOptions() == IndexOptions.DOCS_AND_FREQS) { - // frequencies are not needed because prefix queries always use a constant score - prefixFieldType.setIndexOptions(IndexOptions.DOCS); - } else { - prefixFieldType.setIndexOptions(fieldType.indexOptions()); - } - } else if (fieldType.indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) { - prefixFieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); + if (fieldType.indexOptions() == IndexOptions.DOCS_AND_FREQS) { + // frequencies are not needed because prefix queries always use a constant score + prefixFieldType.setIndexOptions(IndexOptions.DOCS); + } else { + prefixFieldType.setIndexOptions(fieldType.indexOptions()); } if (fieldType.storeTermVectorOffsets()) { prefixFieldType.setStoreTermVectorOffsets(true); diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 3c86b21a0873d..01924e938dcd0 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -217,19 +217,15 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, int shardId = request.shardId().id(); int numShards = context.getIndexSettings().getNumberOfShards(); - if (minNodeVersion.onOrAfter(Version.V_6_4_0) && - (request.preference() != null || request.indexRoutings().length > 0)) { + if (request.preference() != null || request.indexRoutings().length > 0) { GroupShardsIterator group = buildShardIterator(clusterService, request); assert group.size() <= numShards : "index routing shards: " + group.size() + " cannot be greater than total number of shards: " + numShards; if (group.size() < numShards) { - /** + /* * The routing of this request targets a subset of the shards of this index so we need to we retrieve * the original {@link GroupShardsIterator} and compute the request shard id and number of * shards from it. - * This behavior has been added in {@link Version#V_6_4_0} so if there is another node in the cluster - * with an older version we use the original shard id and number of shards in order to ensure that all - * slices use the same numbers. */ numShards = group.size(); int ord = 0; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java index 985b4304a32f4..df940012bf24d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java @@ -28,8 +28,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; - public class CloseIndexRequestTests extends ESTestCase { public void testSerialization() throws Exception { @@ -54,7 +52,8 @@ public void testBwcSerialization() throws Exception { { final CloseIndexRequest request = randomRequest(); try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_2_0))); + out.setVersion(VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), + VersionUtils.getPreviousVersion(Version.V_7_2_0))); request.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { @@ -77,7 +76,8 @@ public void testBwcSerialization() throws Exception { final CloseIndexRequest deserializedRequest = new CloseIndexRequest(); try (StreamInput in = out.bytes().streamInput()) { - in.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_2_0))); + in.setVersion(VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), + VersionUtils.getPreviousVersion(Version.V_7_2_0))); deserializedRequest.readFrom(in); } assertEquals(sample.getParentTask(), deserializedRequest.getParentTask()); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java index e20559ca00561..f2bb3bd3cc03b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java @@ -31,7 +31,6 @@ import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.maxCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; -import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; public class JoinTaskExecutorTests extends ESTestCase { @@ -69,27 +68,25 @@ public void testPreventJoinClusterWithUnsupportedIndices() { public void testPreventJoinClusterWithUnsupportedNodeVersions() { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - final Version version = randomVersion(random()); + final Version version = randomCompatibleVersion(random(), Version.CURRENT); builder.add(new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), version)); builder.add(new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), randomCompatibleVersion(random(), version))); DiscoveryNodes nodes = builder.build(); final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - if (maxNodeVersion.onOrAfter(Version.V_7_0_0)) { - final Version tooLow = getPreviousVersion(maxNodeVersion.minimumCompatibilityVersion()); - expectThrows(IllegalStateException.class, () -> { - if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); - } else { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); - } - }); - } - Version oldMajor = Version.V_6_4_0.minimumCompatibilityVersion(); - expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); + final Version tooLow = getPreviousVersion(maxNodeVersion.minimumCompatibilityVersion()); + expectThrows(IllegalStateException.class, () -> { + if (randomBoolean()) { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); + } else { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); + } + }); + Version oldMajor = minNodeVersion.minimumCompatibilityVersion(); + expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); final Version minGoodVersion = maxNodeVersion.major == minNodeVersion.major ? // we have to stick with the same major diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index edca517830833..5a2fe0233ef05 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -32,6 +33,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.io.UncheckedIOException; @@ -695,4 +697,44 @@ protected boolean forbidPrivateIndexSettings() { */ return false; } + + public void testReorderParent() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("nested1").field("type", "nested").endObject() + .endObject().endObject().endObject()); + + DocumentMapper docMapper = createIndex("test", + Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), + VersionUtils.randomIndexCompatibleVersion(random())).build()) + .mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + + assertThat(docMapper.hasNestedObjects(), equalTo(true)); + ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); + assertThat(nested1Mapper.nested().isNested(), equalTo(true)); + + ParsedDocument doc = docMapper.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", "value") + .startArray("nested1") + .startObject() + .field("field1", "1") + .field("field2", "2") + .endObject() + .startObject() + .field("field1", "3") + .field("field2", "4") + .endObject() + .endArray() + .endObject()), + XContentType.JSON)); + + assertThat(doc.docs().size(), equalTo(3)); + assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); + assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1")); + assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2")); + assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("3")); + assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("4")); + assertThat(doc.docs().get(2).get("field"), equalTo("value")); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 449d17a5b9bda..b2a8d40156f6b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -41,7 +41,6 @@ import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -670,11 +669,7 @@ public void testIndexPrefixIndexTypes() throws IOException { FieldMapper prefix = (FieldMapper) mapper.mappers().getMapper("field._index_prefix"); FieldType ft = prefix.fieldType; - if (indexService.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_4_0)) { - assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); - } else { - assertEquals(IndexOptions.DOCS, ft.indexOptions()); - } + assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); assertFalse(ft.storeTermVectors()); } @@ -691,11 +686,7 @@ public void testIndexPrefixIndexTypes() throws IOException { FieldMapper prefix = (FieldMapper) mapper.mappers().getMapper("field._index_prefix"); FieldType ft = prefix.fieldType; - if (indexService.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_4_0)) { - assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); - } else { - assertEquals(IndexOptions.DOCS, ft.indexOptions()); - } + assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); assertTrue(ft.storeTermVectorOffsets()); } @@ -712,11 +703,7 @@ public void testIndexPrefixIndexTypes() throws IOException { FieldMapper prefix = (FieldMapper) mapper.mappers().getMapper("field._index_prefix"); FieldType ft = prefix.fieldType; - if (indexService.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_4_0)) { - assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); - } else { - assertEquals(IndexOptions.DOCS, ft.indexOptions()); - } + assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, ft.indexOptions()); assertFalse(ft.storeTermVectorOffsets()); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java index 4c59e25804a55..163c730294867 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -38,7 +38,6 @@ import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamOutput; @@ -193,22 +192,13 @@ public void testToQueryInnerTermQuery() throws IOException { final QueryShardContext context = createShardContext(); { Query query = new SpanMultiTermQueryBuilder(new PrefixQueryBuilder(fieldName, "foo")).toQuery(context); - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_4_0)) { - assertThat(query, instanceOf(FieldMaskingSpanQuery.class)); - FieldMaskingSpanQuery fieldQuery = (FieldMaskingSpanQuery) query; - assertThat(fieldQuery.getMaskedQuery(), instanceOf(SpanTermQuery.class)); - assertThat(fieldQuery.getField(), equalTo("prefix_field")); - SpanTermQuery termQuery = (SpanTermQuery) fieldQuery.getMaskedQuery(); - assertThat(termQuery.getTerm().field(), equalTo("prefix_field._index_prefix")); - assertThat(termQuery.getTerm().text(), equalTo("foo")); - } else { - assertThat(query, instanceOf(SpanMultiTermQueryWrapper.class)); - SpanMultiTermQueryWrapper wrapper = (SpanMultiTermQueryWrapper) query; - assertThat(wrapper.getWrappedQuery(), instanceOf(PrefixQuery.class)); - PrefixQuery prefixQuery = (PrefixQuery) wrapper.getWrappedQuery(); - assertThat(prefixQuery.getField(), equalTo("prefix_field")); - assertThat(prefixQuery.getPrefix().text(), equalTo("foo")); - } + assertThat(query, instanceOf(FieldMaskingSpanQuery.class)); + FieldMaskingSpanQuery fieldQuery = (FieldMaskingSpanQuery) query; + assertThat(fieldQuery.getMaskedQuery(), instanceOf(SpanTermQuery.class)); + assertThat(fieldQuery.getField(), equalTo("prefix_field")); + SpanTermQuery termQuery = (SpanTermQuery) fieldQuery.getMaskedQuery(); + assertThat(termQuery.getTerm().field(), equalTo("prefix_field._index_prefix")); + assertThat(termQuery.getTerm().text(), equalTo("foo")); } { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java index 4c8a558682b13..c39b37373ea13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java @@ -6,12 +6,9 @@ package org.elasticsearch.license; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.rest.RestStatus; -import java.util.stream.StreamSupport; - public class LicenseUtils { public static final String EXPIRED_FEATURE_METADATA = "es.license.expired.feature"; @@ -58,13 +55,6 @@ public static boolean signatureNeedsUpdate(License license, DiscoveryNodes curre public static int compatibleLicenseVersion(DiscoveryNodes currentNodes) { assert License.VERSION_CRYPTO_ALGORITHMS == License.VERSION_CURRENT : "update this method when adding a new version"; - - if (StreamSupport.stream(currentNodes.spliterator(), false) - .allMatch(node -> node.getVersion().onOrAfter(Version.V_6_4_0))) { - // License.VERSION_CRYPTO_ALGORITHMS was introduced in 6.4.0 - return License.VERSION_CRYPTO_ALGORITHMS; - } else { - return License.VERSION_START_DATE; - } + return License.VERSION_CRYPTO_ALGORITHMS; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index 17de9dfc3522c..b71ca63e3218f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; @@ -185,9 +184,7 @@ public JobStats(StreamInput in) throws IOException { node = in.readOptionalWriteable(DiscoveryNode::new); assignmentExplanation = in.readOptionalString(); openTime = in.readOptionalTimeValue(); - if (in.getVersion().onOrAfter(Version.V_6_4_0)) { - forecastStats = in.readOptionalWriteable(ForecastStats::new); - } + forecastStats = in.readOptionalWriteable(ForecastStats::new); } public String getJobId() { @@ -275,9 +272,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(node); out.writeOptionalString(assignmentExplanation); out.writeOptionalTimeValue(openTime); - if (out.getVersion().onOrAfter(Version.V_6_4_0)) { - out.writeOptionalWriteable(forecastStats); - } + out.writeOptionalWriteable(forecastStats); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java index 25cd0cffe7b39..8ee63f6c11ea2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.config; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -26,8 +25,6 @@ public class DetectionRule implements ToXContentObject, Writeable { - public static final Version VERSION_INTRODUCED = Version.V_6_4_0; - public static final ParseField DETECTION_RULE_FIELD = new ParseField("detection_rule"); public static final ParseField ACTIONS_FIELD = new ParseField("actions"); public static final ParseField SCOPE_FIELD = new ParseField("scope"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java index b27149ef412a5..4903a1383bcdd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -246,11 +246,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getVersion().onOrAfter(DetectionRule.VERSION_INTRODUCED)) { - out.writeList(rules); - } else { - out.writeList(Collections.emptyList()); - } + out.writeList(rules); out.writeInt(detectorIndex); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java index f2be3315b4dc7..4c60a4795e050 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.config; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -76,20 +75,14 @@ private MlFilter(String id, String description, SortedSet items) { public MlFilter(StreamInput in) throws IOException { id = in.readString(); - if (in.getVersion().onOrAfter(Version.V_6_4_0)) { - description = in.readOptionalString(); - } else { - description = null; - } + description = in.readOptionalString(); items = new TreeSet<>(Arrays.asList(in.readStringArray())); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); - if (out.getVersion().onOrAfter(Version.V_6_4_0)) { - out.writeOptionalString(description); - } + out.writeOptionalString(description); out.writeStringArray(items.toArray(new String[items.size()])); } @@ -201,4 +194,4 @@ public MlFilter build() { return new MlFilter(id, description, items); } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java index 576bed5dcea2f..4b25b456c15b4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -78,9 +77,7 @@ public CategoryDefinition(StreamInput in) throws IOException { regex = in.readString(); maxMatchingLength = in.readLong(); examples = new TreeSet<>(in.readStringList()); - if (in.getVersion().onOrAfter(Version.V_6_4_0)) { - grokPattern = in.readOptionalString(); - } + grokPattern = in.readOptionalString(); } @Override @@ -91,9 +88,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(regex); out.writeLong(maxMatchingLength); out.writeStringCollection(examples); - if (out.getVersion().onOrAfter(Version.V_6_4_0)) { - out.writeOptionalString(grokPattern); - } + out.writeOptionalString(grokPattern); } public String getJobId() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index d2aea878b0f50..86d6fe3dfe59e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -45,7 +45,6 @@ import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; -import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; @@ -182,14 +181,6 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j continue; } - if (jobHasRules(job) && node.getVersion().before(DetectionRule.VERSION_INTRODUCED)) { - String reason = "Not opening job [" + jobId + "] on node [" + nodeNameAndVersion(node) + "], because jobs using " + - "custom_rules require a node of version [" + DetectionRule.VERSION_INTRODUCED + "] or higher"; - logger.trace(reason); - reasons.add(reason); - continue; - } - long numberOfAssignedJobs = 0; int numberOfAllocatingJobs = 0; long assignedJobMemory = 0; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 5f1a4050d1f3e..a35b9d0968134 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -402,36 +402,6 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() assertNull(result.getExecutorNode()); } - public void testSelectLeastLoadedMlNode_jobWithRulesButNoNodeMeetsRequiredVersion() { - Map nodeAttr = new HashMap<>(); - nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); - nodeAttr.put(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000"); - Version version = Version.fromString("6.3.0"); - DiscoveryNodes nodes = DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), - nodeAttr, Collections.emptySet(), version)) - .add(new DiscoveryNode("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), - nodeAttr, Collections.emptySet(), version)) - .build(); - - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - addJobTask("job_with_rules", "_node_id1", null, tasksBuilder); - PersistentTasksCustomMetaData tasks = tasksBuilder.build(); - - ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); - MetaData.Builder metaData = MetaData.builder(); - cs.nodes(nodes); - metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); - cs.metaData(metaData); - - Job job = jobWithRules("job_with_rules"); - Assignment result = TransportOpenJobAction.selectLeastLoadedMlNode("job_with_rules", job, cs.build(), 10, 2, 30, memoryTracker, - isMemoryTrackerRecentlyRefreshed, logger); - assertThat(result.getExplanation(), containsString( - "because jobs using custom_rules require a node of version [6.4.0] or higher")); - assertNull(result.getExecutorNode()); - } - public void testSelectLeastLoadedMlNode_jobWithRulesAndNodeMeetsRequiredVersion() { Map nodeAttr = new HashMap<>(); nodeAttr.put(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "10"); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java index 3f49609953ea9..19f241440c438 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; @@ -303,7 +304,7 @@ public void testJobAlreadyInMapping() { doAnswer(invocation -> { GetMappingsResponse response = mock(GetMappingsResponse.class); Map m = new HashMap<>(2); - m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, Version.V_6_4_0); + m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, VersionUtils.randomIndexCompatibleVersion(random())); m.put(RollupField.ROLLUP_META, Collections.singletonMap(job.getConfig().getId(), job.getConfig())); MappingMetaData meta = new MappingMetaData(RollupField.TYPE_NAME, @@ -344,7 +345,7 @@ public void testAddJobToMapping() { doAnswer(invocation -> { GetMappingsResponse response = mock(GetMappingsResponse.class); Map m = new HashMap<>(2); - m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, Version.V_6_4_0); + m.put(Rollup.ROLLUP_TEMPLATE_VERSION_FIELD, VersionUtils.randomIndexCompatibleVersion(random())); m.put(RollupField.ROLLUP_META, Collections.singletonMap(unrelatedJob.getId(), unrelatedJob)); MappingMetaData meta = new MappingMetaData(RollupField.TYPE_NAME, From 0db0e1330c55b6c5a4c529d6a4f6ecdca7dc4449 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 28 May 2019 15:58:03 +0100 Subject: [PATCH 306/321] [ML Data Frame] Set DF task state when stopping (#42516) Set the state to stopped prior to persisting --- ...FrameTransformPersistentTasksExecutor.java | 13 +------------ .../transforms/DataFrameTransformTask.java | 19 +++++++------------ .../test/data_frame/transforms_start_stop.yml | 4 ++-- 3 files changed, 10 insertions(+), 26 deletions(-) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java index 9ed8da61d8feb..443d499dfefd1 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java @@ -30,7 +30,6 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.dataframe.DataFrame; @@ -223,18 +222,8 @@ private void startTask(DataFrameTransformTask buildTask, DataFrameTransformTask.ClientDataFrameIndexerBuilder indexerBuilder, Long previousCheckpoint, ActionListener listener) { - // If we are stopped, and it is an initial run, this means we have never been started, - // attempt to start the task - buildTask.initializeIndexer(indexerBuilder); - // TODO isInitialRun is false after relocation?? - if (buildTask.getState().getTaskState().equals(DataFrameTransformTaskState.STOPPED) && buildTask.isInitialRun()) { - logger.info("Data frame transform [{}] created.", buildTask.getTransformId()); - buildTask.start(previousCheckpoint, listener); - } else { - logger.debug("No need to start task. Its current state is: {}", buildTask.getState().getIndexerState()); - listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); - } + buildTask.start(previousCheckpoint, listener); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 926f233c454d1..13deab6748c94 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -174,13 +174,8 @@ public long getInProgressCheckpoint() { } } - public boolean isStopped() { - IndexerState currentState = getIndexer() == null ? initialIndexerState : getIndexer().getState(); - return currentState.equals(IndexerState.STOPPED); - } - - boolean isInitialRun() { - return getIndexer() != null && getIndexer().initialRun(); + public void setTaskStateStopped() { + taskState.set(DataFrameTransformTaskState.STOPPED); } /** @@ -235,11 +230,9 @@ public synchronized void start(Long startingCheckpoint, ActionListener public synchronized void stop() { if (getIndexer() == null) { - return; - } - // taskState is initialized as STOPPED and is updated in tandem with the indexerState - // Consequently, if it is STOPPED, we consider the whole task STOPPED. - if (taskState.get() == DataFrameTransformTaskState.STOPPED) { + // If there is no indexer the task has not been triggered + // but it still needs to be stopped and removed + shutdown(); return; } @@ -609,6 +602,8 @@ protected void onFinish(ActionListener listener) { protected void onStop() { auditor.info(transformConfig.getId(), "Indexer has stopped"); logger.info("Data frame transform [{}] indexer has stopped", transformConfig.getId()); + + transformTask.setTaskStateStopped(); transformsConfigManager.putOrUpdateTransformStats( new DataFrameTransformStateAndStats(transformId, transformTask.getState(), getStats(), DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index a475c3ceadca6..4909761c5633b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -114,8 +114,8 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } -# - match: { transforms.0.state.indexer_state: "stopped" } -# - match: { transforms.0.state.task_state: "stopped" } + - match: { transforms.0.state.indexer_state: "stopped" } + - match: { transforms.0.state.task_state: "stopped" } - do: data_frame.start_data_frame_transform: From 905902c3257c0548a5ed268d5851d4af07a51540 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 28 May 2019 09:04:02 -0700 Subject: [PATCH 307/321] [DOCS] Reorg monitoring configuration for re-use (#42547) --- .../configuring-metricbeat.asciidoc | 121 ++++++++++-------- 1 file changed, 68 insertions(+), 53 deletions(-) diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index df578e88da614..e337c5bf7d345 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -17,6 +17,8 @@ image::monitoring/images/metricbeat.png[Example monitoring architecture] To learn about monitoring in general, see {stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. +//NOTE: The tagged regions are re-used in the Stack Overview. + . Enable the collection of monitoring data. Set `xpack.monitoring.collection.enabled` to `true` on each node in the production cluster. By default, it is is disabled (`false`). @@ -71,13 +73,13 @@ PUT _cluster/settings Leave `xpack.monitoring.enabled` set to its default value (`true`). -- -. On each {es} node in the production cluster: - -.. {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}]. +. {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}] on each +{es} node in the production cluster. -.. Enable the {es} module in {metricbeat}. + +. Enable the {es} {xpack} module in {metricbeat} on each {es} node. + + -- +// tag::enable-es-module[] For example, to enable the default configuration in the `modules.d` directory, run the following command: @@ -89,39 +91,57 @@ metricbeat modules enable elasticsearch-xpack For more information, see {metricbeat-ref}/configuration-metricbeat.html[Specify which modules to run] and {metricbeat-ref}/metricbeat-module-elasticsearch.html[{es} module]. --- - -.. By default the module will collect {es} monitoring metrics from `http://localhost:9200`. -If the local {es} node has a different address, you must specify it via the `hosts` setting -in the `modules.d/elasticsearch-xpack.yml` file. -.. If Elastic {security-features} are enabled, you must also provide a user ID -and password so that {metricbeat} can collect metrics successfully. - -... Create a user on the production cluster that has the -{stack-ov}/built-in-roles.html[`remote_monitoring_collector` built-in role]. -Alternatively, use the {stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. +// end::enable-es-module[] +-- -... Add the `username` and `password` settings to the {es} module configuration -file. +. Configure the {es} {xpack} module in {metricbeat} on each {es} node. + + -- -For example, add the following settings in the `modules.d/elasticsearch-xpack.yml` file: +// tag::configure-es-module[] +The `modules.d/elasticsearch-xpack.yml` file contains the following settings: [source,yaml] ---------------------------------- -- module: elasticsearch - ... - username: remote_monitoring_user - password: YOUR_PASSWORD + - module: elasticsearch + metricsets: + - ccr + - cluster_stats + - index + - index_recovery + - index_summary + - ml_job + - node_stats + - shard + period: 10s + hosts: ["http://localhost:9200"] + #username: "user" + #password: "secret" + xpack.enabled: true ---------------------------------- --- -.. If you configured {es} to use <>, -you must access it via HTTPS. For example, use a `hosts` setting like -`https://localhost:9200` in the `modules.d/elasticsearch-xpack.yml` file. +By default, the module collects {es} monitoring metrics from +`http://localhost:9200`. If that host and port number are not correct, you must +update the `hosts` setting. If you configured {es} to use encrypted +communications, you must access it via HTTPS. For example, use a `hosts` setting +like `https://localhost:9200`. +// end::configure-es-module[] -.. Identify where to send the monitoring data. + +// tag::remote-monitoring-user[] +If Elastic {security-features} are enabled, you must also provide a user ID +and password so that {metricbeat} can collect metrics successfully: + +.. Create a user on the production cluster that has the +{stack-ov}/built-in-roles.html[`remote_monitoring_collector` built-in role]. +Alternatively, use the +{stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. + +.. Add the `username` and `password` settings to the {es} module configuration +file. +// end::remote-monitoring-user[] +-- + +. Identify where to send the monitoring data. + + -- TIP: In production environments, we strongly recommend using a separate cluster @@ -136,48 +156,43 @@ configuration file (`metricbeat.yml`): [source,yaml] ---------------------------------- output.elasticsearch: + # Array of hosts to connect to. hosts: ["http://es-mon-1:9200", "http://es-mon2:9200"] <1> + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" ---------------------------------- <1> In this example, the data is stored on a monitoring cluster with nodes `es-mon-1` and `es-mon-2`. +If you configured the monitoring cluster to use encrypted communications, you +must access it via HTTPS. For example, use a `hosts` setting like +`https://es-mon-1:9200`. + IMPORTANT: The {es} {monitor-features} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one <>. -For more information about these configuration options, see -{metricbeat-ref}/elasticsearch-output.html[Configure the {es} output]. --- - -.. If {es} {security-features} are enabled on the monitoring cluster, you -must provide a valid user ID and password so that {metricbeat} can send metrics -successfully. +If {es} {security-features} are enabled on the monitoring cluster, you must +provide a valid user ID and password so that {metricbeat} can send metrics +successfully: -... Create a user on the monitoring cluster that has the +.. Create a user on the monitoring cluster that has the {stack-ov}/built-in-roles.html[`remote_monitoring_agent` built-in role]. Alternatively, use the {stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. -... Add the `username` and `password` settings to the {es} output information in -the {metricbeat} configuration file (`metricbeat.yml`): -+ --- -[source,yaml] ----------------------------------- -output.elasticsearch: - ... - username: remote_monitoring_user - password: YOUR_PASSWORD ----------------------------------- --- +.. Add the `username` and `password` settings to the {es} output information in +the {metricbeat} configuration file. -.. If you configured the monitoring cluster to use -<>, you must access it via -HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200` in the -`metricbeat.yml` file. +For more information about these configuration options, see +{metricbeat-ref}/elasticsearch-output.html[Configure the {es} output]. +-- -. <>. +. <> on each node. -. {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}]. +. {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}] on each node. . {kibana-ref}/monitoring-data.html[View the monitoring data in {kib}]. From 492efa7729fc826734807aefdd191a4b850f0f03 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 28 May 2019 18:37:17 +0200 Subject: [PATCH 308/321] Remove suppresions for "unchecked" for hamcrest varargs methods (#41528) In hamcrest 2.1 warnings for unchecked varargs were fixed by hamcrest using @SafeVarargs for those matchers where this warning occurred. This PR is aimed to remove these annotations when Matchers.contains ,Matchers.containsInAnyOrder or Matchers.hasItems was used --- .../java/org/elasticsearch/client/BulkProcessorIT.java | 2 -- .../client/BulkRequestWithGlobalParametersIT.java | 5 ----- .../elasticsearch/common/logging/JsonLoggerTests.java | 2 -- .../xpack/restart/FullClusterRestartIT.java | 1 - .../security/authc/ldap/SearchGroupsResolverTests.java | 1 - .../authc/ldap/ActiveDirectoryGroupsResolverTests.java | 1 - .../authc/ldap/ActiveDirectorySessionFactoryTests.java | 9 --------- .../authc/ldap/UserAttributeGroupsResolverTests.java | 3 --- 8 files changed, 24 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index 762e927551b8b..2aa9457bcd897 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -291,7 +291,6 @@ public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception assertMultiGetResponse(highLevelClient().mget(multiGetRequest, RequestOptions.DEFAULT), testDocs); } - @SuppressWarnings("unchecked") public void testGlobalParametersAndSingleRequest() throws Exception { createIndexWithMultipleShards("test"); @@ -326,7 +325,6 @@ public void testGlobalParametersAndSingleRequest() throws Exception { assertThat(blogs, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); } - @SuppressWarnings("unchecked") public void testGlobalParametersAndBulkProcessor() throws Exception { createIndexWithMultipleShards("test"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java index 3020eb0329b5c..dc49e6f88a6e4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java @@ -44,7 +44,6 @@ public class BulkRequestWithGlobalParametersIT extends ESRestHighLevelClientTestCase { - @SuppressWarnings("unchecked") public void testGlobalPipelineOnBulkRequest() throws IOException { createFieldAddingPipleine("xyz", "fieldNameXYZ", "valueXYZ"); @@ -83,7 +82,6 @@ public void testPipelineOnRequestOverridesGlobalPipeline() throws IOException { assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldXYZ"), nullValue()))); } - @SuppressWarnings("unchecked") public void testMixPipelineOnRequestAndGlobal() throws IOException { createFieldAddingPipleine("globalId", "fieldXYZ", "valueXYZ"); createFieldAddingPipleine("perIndexId", "someNewField", "someValue"); @@ -153,7 +151,6 @@ public void testGlobalType() throws IOException { assertThat(hits, everyItem(hasType("global_type"))); } - @SuppressWarnings("unchecked") public void testTypeGlobalAndPerRequest() throws IOException { BulkRequest request = new BulkRequest(null, "global_type"); request.add(new IndexRequest("index1", "local_type", "1") @@ -171,7 +168,6 @@ public void testTypeGlobalAndPerRequest() throws IOException { .and(hasType("global_type")))); } - @SuppressWarnings("unchecked") public void testGlobalRouting() throws IOException { createIndexWithMultipleShards("index"); BulkRequest request = new BulkRequest(null); @@ -189,7 +185,6 @@ public void testGlobalRouting() throws IOException { assertThat(hits, containsInAnyOrder(hasId("1"), hasId("2"))); } - @SuppressWarnings("unchecked") public void testMixLocalAndGlobalRouting() throws IOException { BulkRequest request = new BulkRequest(null); request.routing("globalRouting"); diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index bbb20737c4708..b62e1a78e82ca 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -67,7 +67,6 @@ public void tearDown() throws Exception { super.tearDown(); } - @SuppressWarnings("unchecked") public void testJsonLayout() throws IOException { final Logger testLogger = LogManager.getLogger("test"); @@ -90,7 +89,6 @@ public void testJsonLayout() throws IOException { } } - @SuppressWarnings("unchecked") public void testPrefixLoggerInJson() throws IOException { Logger shardIdLogger = Loggers.getLogger("shardIdLogger", ShardId.fromString("[indexName][123]")); shardIdLogger.info("This is an info message with a shardId"); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 3ac0b20f95d0f..aa0788bd2b426 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -277,7 +277,6 @@ private String loadWatch(String watch) throws IOException { return StreamsUtils.copyToStringFromClasspath("/org/elasticsearch/xpack/restart/" + watch); } - @SuppressWarnings("unchecked") private void assertOldTemplatesAreDeleted() throws IOException { Map templates = entityAsMap(client().performRequest(new Request("GET", "/_template"))); assertThat(templates.keySet(), not(hasItems(is("watches"), startsWith("watch-history"), is("triggered_watches")))); diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java index 036cf8ad0db33..f24bcface06bb 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java @@ -23,7 +23,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -@SuppressWarnings("unchecked") public class SearchGroupsResolverTests extends GroupsResolverTestCase { private static final String BRUCE_BANNER_DN = "uid=hulk,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java index 1a4fd0242dbaa..7fbbd217ae90b 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java @@ -35,7 +35,6 @@ public void setReferralFollowing() { } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35738") - @SuppressWarnings("unchecked") public void testResolveSubTree() throws Exception { Settings settings = Settings.builder() .put("xpack.security.authc.realms.active_directory.ad.group_search.scope", LdapSearchScope.SUB_TREE) diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java index 73e1df5dd08bd..3dc432b482bd6 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java @@ -66,7 +66,6 @@ public boolean enableWarningsCheck() { return false; } - @SuppressWarnings("unchecked") public void testAdAuth() throws Exception { RealmConfig config = configureRealm("ad-test", buildAdSettings(AD_LDAP_URL, AD_DOMAIN, false)); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { @@ -101,7 +100,6 @@ private RealmConfig configureRealm(String name, Settings settings) { return new RealmConfig(identifier, mergedSettings, env, new ThreadContext(globalSettings)); } - @SuppressWarnings("unchecked") public void testNetbiosAuth() throws Exception { final String adUrl = randomFrom(AD_LDAP_URL, AD_LDAP_GC_URL); RealmConfig config = configureRealm("ad-test", buildAdSettings(adUrl, AD_DOMAIN, false)); @@ -139,7 +137,6 @@ public void testAdAuthAvengers() throws Exception { } } - @SuppressWarnings("unchecked") public void testAuthenticate() throws Exception { Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, false); @@ -163,7 +160,6 @@ public void testAuthenticate() throws Exception { } } - @SuppressWarnings("unchecked") public void testAuthenticateBaseUserSearch() throws Exception { Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Bruce Banner, CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.BASE, false); @@ -208,7 +204,6 @@ public void testAuthenticateBaseGroupSearch() throws Exception { } } - @SuppressWarnings("unchecked") public void testAuthenticateWithUserPrincipalName() throws Exception { Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, false); @@ -229,7 +224,6 @@ public void testAuthenticateWithUserPrincipalName() throws Exception { } } - @SuppressWarnings("unchecked") public void testAuthenticateWithSAMAccountName() throws Exception { Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, false); @@ -251,7 +245,6 @@ public void testAuthenticateWithSAMAccountName() throws Exception { } } - @SuppressWarnings("unchecked") public void testCustomUserFilter() throws Exception { Settings settings = Settings.builder() .put(buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", @@ -275,7 +268,6 @@ public void testCustomUserFilter() throws Exception { } - @SuppressWarnings("unchecked") public void testStandardLdapConnection() throws Exception { String groupSearchBase = "DC=ad,DC=test,DC=elasticsearch,DC=com"; String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; @@ -341,7 +333,6 @@ public void testHandlingLdapReferralErrors() throws Exception { } } - @SuppressWarnings("unchecked") public void testStandardLdapWithAttributeGroups() throws Exception { String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; Settings settings = LdapTestCase.buildLdapSettings(new String[]{AD_LDAP_URL}, userTemplate, false); diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java index 24f0ecace67b9..38adbbe019048 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java @@ -29,7 +29,6 @@ public class UserAttributeGroupsResolverTests extends GroupsResolverTestCase { public static final String BRUCE_BANNER_DN = "cn=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; private static final RealmConfig.RealmIdentifier REALM_ID = new RealmConfig.RealmIdentifier("ldap", "realm1"); - @SuppressWarnings("unchecked") public void testResolve() throws Exception { //falling back on the 'memberOf' attribute UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(config(REALM_ID, Settings.EMPTY)); @@ -42,7 +41,6 @@ public void testResolve() throws Exception { containsString("Philanthropists"))); } - @SuppressWarnings("unchecked") public void testResolveFromPreloadedAttributes() throws Exception { SearchRequest preSearch = new SearchRequest(BRUCE_BANNER_DN, SearchScope.BASE, LdapUtils.OBJECT_CLASS_PRESENCE_FILTER, "memberOf"); final Collection attributes = ldapConnection.searchForEntry(preSearch).getAttributes(); @@ -57,7 +55,6 @@ public void testResolveFromPreloadedAttributes() throws Exception { containsString("Philanthropists"))); } - @SuppressWarnings("unchecked") public void testResolveCustomGroupAttribute() throws Exception { Settings settings = Settings.builder() .put("user_group_attribute", "seeAlso") From f07b90f3c351fb352d2058229b50fbf94442c07d Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 28 May 2019 09:49:40 -0700 Subject: [PATCH 309/321] Remove support for chained multi-fields. (#42333) Follow-up to #41926, where we deprecated support for multi-fields within multi-fields. Addresses #41267. --- .../migration/migrate_8_0/mappings.asciidoc | 9 ++ .../index/mapper/TypeParsers.java | 18 ++-- .../mapper/ExternalFieldMapperTests.java | 89 ------------------- .../ExternalValuesMapperIntegrationIT.java | 10 +-- .../index/mapper/TypeParsersTests.java | 38 +++++--- 5 files changed, 52 insertions(+), 112 deletions(-) diff --git a/docs/reference/migration/migrate_8_0/mappings.asciidoc b/docs/reference/migration/migrate_8_0/mappings.asciidoc index 371e9fc44c415..16e75473885c6 100644 --- a/docs/reference/migration/migrate_8_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_8_0/mappings.asciidoc @@ -14,3 +14,12 @@ The number of completion contexts within a single completion field has been limited to 10. + +[float] +==== Defining multi-fields within multi-fields + +Previously, it was possible to define a multi-field within a multi-field. +Defining chained multi-fields was deprecated in 7.3 and is now no longer +supported. To migrate the mappings, all instances of `fields` that occur within +a `fields` block should be removed, either by flattening the chained `fields` +blocks into a single level, or by switching to `copy_to` if appropriate. \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index 9848a23cac11b..12c80361a855c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -219,11 +220,18 @@ public static boolean parseMultiField(FieldMapper.Builder builder, String name, String propName, Object propNode) { if (propName.equals("fields")) { if (parserContext.isWithinMultiField()) { - deprecationLogger.deprecatedAndMaybeLog("multifield_within_multifield", "At least one multi-field, [" + name + "], was " + - "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + - "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + - "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + - "switching to [copy_to] if appropriate."); + // For indices created prior to 8.0, we only emit a deprecation warning and do not fail type parsing. This is to + // maintain the backwards-compatibility guarantee that we can always load indexes from the previous major version. + if (parserContext.indexVersionCreated().before(Version.V_8_0_0)) { + deprecationLogger.deprecatedAndMaybeLog("multifield_within_multifield", "At least one multi-field, [" + name + "], " + + "was encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated " + + "and is not supported for indices created in 8.0 and later. To migrate the mappings, all instances of [fields] " + + "that occur within a [fields] block should be removed from the mappings, either by flattening the chained " + + "[fields] blocks into a single level, or switching to [copy_to] if appropriate."); + } else { + throw new IllegalArgumentException("Encountered a multi-field [" + name + "] which itself contains a multi-field. " + + "Defining chained multi-fields is not supported."); + } } parserContext = parserContext.createMultiFieldContext(parserContext); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index e5d3040f7a3bc..5515603db3476 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; @@ -132,12 +131,6 @@ public void testExternalValuesWithMultifield() throws Exception { .startObject("field") .field("type", "text") .field("store", true) - .startObject("fields") - .startObject("raw") - .field("type", "keyword") - .field("store", true) - .endObject() - .endObject() .endObject() .endObject() .endObject() @@ -164,87 +157,5 @@ public void testExternalValuesWithMultifield() throws Exception { IndexableField field = doc.rootDoc().getField("field.field"); assertThat(field, notNullValue()); assertThat(field.stringValue(), is("foo")); - - IndexableField raw = doc.rootDoc().getField("field.field.raw"); - - assertThat(raw, notNullValue()); - assertThat(raw.binaryValue(), is(new BytesRef("foo"))); - - assertWarnings("At least one multi-field, [field], was " + - "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + - "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + - "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + - "switching to [copy_to] if appropriate."); - } - - public void testExternalValuesWithMultifieldTwoLevels() throws Exception { - IndexService indexService = createIndex("test"); - Map mapperParsers = new HashMap<>(); - mapperParsers.put(ExternalMapperPlugin.EXTERNAL, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "foo")); - mapperParsers.put(ExternalMapperPlugin.EXTERNAL_BIS, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "bar")); - mapperParsers.put(TextFieldMapper.CONTENT_TYPE, new TextFieldMapper.TypeParser()); - MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER); - - Supplier queryShardContext = () -> { - return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }, null); - }; - DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.xContentRegistry(), indexService.similarityService(), mapperRegistry, queryShardContext); - - DocumentMapper documentMapper = parser.parse("type", new CompressedXContent( - Strings - .toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("field") - .field("type", ExternalMapperPlugin.EXTERNAL) - .startObject("fields") - .startObject("field") - .field("type", "text") - .startObject("fields") - .startObject("generated") - .field("type", ExternalMapperPlugin.EXTERNAL_BIS) - .endObject() - .startObject("raw") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .startObject("raw") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject().endObject().endObject()))); - - ParsedDocument doc = documentMapper.parse(new SourceToParse("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "1234") - .endObject()), - XContentType.JSON)); - - assertThat(doc.rootDoc().getField("field.bool"), notNullValue()); - assertThat(doc.rootDoc().getField("field.bool").stringValue(), is("T")); - - assertThat(doc.rootDoc().getField("field.point"), notNullValue()); - - assertThat(doc.rootDoc().getField("field.shape"), notNullValue()); - - assertThat(doc.rootDoc().getField("field.field"), notNullValue()); - assertThat(doc.rootDoc().getField("field.field").stringValue(), is("foo")); - - assertThat(doc.rootDoc().getField("field.field.generated.generated"), notNullValue()); - assertThat(doc.rootDoc().getField("field.field.generated.generated").stringValue(), is("bar")); - - assertThat(doc.rootDoc().getField("field.field.raw"), notNullValue()); - assertThat(doc.rootDoc().getField("field.field.raw").stringValue(), is("foo")); - - assertThat(doc.rootDoc().getField("field.raw"), notNullValue()); - assertThat(doc.rootDoc().getField("field.raw").stringValue(), is("foo")); - - assertWarnings("At least one multi-field, [field], was " + - "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + - "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + - "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + - "switching to [copy_to] if appropriate."); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java index 6d47e4a784e06..7e7764d9514fe 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalValuesMapperIntegrationIT.java @@ -139,14 +139,8 @@ public void testExternalValuesWithMultifield() throws Exception { .field("type", ExternalMapperPlugin.EXTERNAL_UPPER) .startObject("fields") .startObject("g") - .field("type", "text") + .field("type", "keyword") .field("store", true) - .startObject("fields") - .startObject("raw") - .field("type", "keyword") - .field("store", true) - .endObject() - .endObject() .endObject() .endObject() .endObject() @@ -156,7 +150,7 @@ public void testExternalValuesWithMultifield() throws Exception { refresh(); SearchResponse response = client().prepareSearch("test-idx") - .setQuery(QueryBuilders.termQuery("f.g.raw", "FOO BAR")) + .setQuery(QueryBuilders.termQuery("f.g", "FOO BAR")) .execute().actionGet(); assertThat(response.getHits().getTotalHits().value, equalTo((long) 1)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index 70f469b96370c..b0fbc3618ed2f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.Collections; @@ -48,6 +49,7 @@ import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_ANALYZER_NAME; import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_ANALYZER_NAME; import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_QUOTED_ANALYZER_NAME; +import static org.hamcrest.core.IsEqual.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -179,19 +181,35 @@ public void testMultiFieldWithinMultiField() throws IOException { .endObject() .endObject(); + Mapper.TypeParser typeParser = new KeywordFieldMapper.TypeParser(); + + // For indices created prior to 8.0, we should only emit a warning and not fail parsing. Map fieldNode = XContentHelper.convertToMap( BytesReference.bytes(mapping), true, mapping.contentType()).v2(); - Mapper.TypeParser typeParser = new KeywordFieldMapper.TypeParser(); - Mapper.TypeParser.ParserContext parserContext = new Mapper.TypeParser.ParserContext("type", - null, null, type -> typeParser, Version.CURRENT, null); - - TypeParsers.parseField(builder, "some-field", fieldNode, parserContext); - assertWarnings("At least one multi-field, [sub-field], was " + - "encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated and will " + - "no longer be supported in 8.0. To resolve the issue, all instances of [fields] that occur within a [fields] block " + - "should be removed from the mappings, either by flattening the chained [fields] blocks into a single level, or " + - "switching to [copy_to] if appropriate."); + Version olderVersion = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); + Mapper.TypeParser.ParserContext olderContext = new Mapper.TypeParser.ParserContext("type", + null, null, type -> typeParser, olderVersion, null); + + TypeParsers.parseField(builder, "some-field", fieldNode, olderContext); + assertWarnings("At least one multi-field, [sub-field], " + + "was encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated " + + "and is not supported for indices created in 8.0 and later. To migrate the mappings, all instances of [fields] " + + "that occur within a [fields] block should be removed from the mappings, either by flattening the chained " + + "[fields] blocks into a single level, or switching to [copy_to] if appropriate."); + + // For indices created in 8.0 or later, we should throw an error. + Map fieldNodeCopy = XContentHelper.convertToMap( + BytesReference.bytes(mapping), true, mapping.contentType()).v2(); + + Version version = VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, Version.CURRENT); + Mapper.TypeParser.ParserContext context = new Mapper.TypeParser.ParserContext("type", + null, null, type -> typeParser, version, null); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> TypeParsers.parseField(builder, "some-field", fieldNodeCopy, context)); + assertThat(e.getMessage(), equalTo("Encountered a multi-field [sub-field] which itself contains a " + + "multi-field. Defining chained multi-fields is not supported.")); } private Analyzer createAnalyzerWithMode(String name, AnalysisMode mode) { From 7d2809597047cfb0cbda441070b513ae30d00405 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 28 May 2019 13:04:19 -0400 Subject: [PATCH 310/321] Lazily compute Java 8 home in reindex configuration (#42630) In the reindex from old tests we require Java 8. Today when configuring the reindex from old tests, we eagerly evalulate Java 8 home, which means that we require JAVA8_HOME to be set even if the reindex from old test tasks are not in the task graph. This is an onerous requirement if, for example, all that you want to do is build a distribution. This commit addresses this by making evaluation of Java 8 home lazy, so that it is only done and required if the reindex from old test tasks would be executed. --- modules/reindex/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index da184deedaa11..260c8dcc1df79 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -125,7 +125,7 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) { dependsOn unzip executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" - env 'JAVA_HOME', getJavaHome(it, 8) + env 'JAVA_HOME', "${ -> getJavaHome(it, 8)}" args 'oldes.OldElasticsearch', baseDir, unzip.temporaryDir, From 6e39433cd5377536bda3e09168f1872660188ac0 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 28 May 2019 19:09:09 +0200 Subject: [PATCH 311/321] Remove "nodes/0" folder prefix from data path (#42489) With the removal of node.max_local_storage_nodes, there is no need anymore to keep the data in subfolders indexed by a node ordinal. This commit makes it so that ES 8.0 will store data directly in $DATA_DIR instead of $DATA_DIR/nodes/$nodeOrdinal. Upon startup, Elasticsearch will check to see if there is data in the old location, and automatically move it to the new location. This automatic migration only works if $nodeOrdinal is 0, i.e., multiple node instances have not previously run on the same data path, which required for node.max_local_storage_nodes to explicitly be configured. --- docs/reference/commands/shard-tool.asciidoc | 10 +- .../migration/migrate_8_0/node.asciidoc | 22 +++ .../env/NodeEnvironmentEvilTests.java | 9 +- .../elasticsearch/env/NodeEnvironment.java | 170 +++++++++++++++--- .../RemoveCorruptedShardDataCommand.java | 5 +- .../RecoveryWithUnsupportedIndicesIT.java | 17 +- .../elasticsearch/env/NodeEnvironmentIT.java | 85 +++++++++ .../env/NodeEnvironmentTests.java | 12 +- .../index/shard/NewPathForShardTests.java | 2 +- .../RemoveCorruptedShardDataCommandTests.java | 5 +- 10 files changed, 285 insertions(+), 52 deletions(-) diff --git a/docs/reference/commands/shard-tool.asciidoc b/docs/reference/commands/shard-tool.asciidoc index 6fca1355a27be..c13c8d3db6a36 100644 --- a/docs/reference/commands/shard-tool.asciidoc +++ b/docs/reference/commands/shard-tool.asciidoc @@ -51,14 +51,14 @@ $ bin/elasticsearch-shard remove-corrupted-data --index twitter --shard-id 0 Please make a complete backup of your index before using this tool. -Opening Lucene index at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ +Opening Lucene index at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ - >> Lucene index is corrupted at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ + >> Lucene index is corrupted at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ -Opening translog at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/ +Opening translog at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/ - >> Translog is clean at /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/ + >> Translog is clean at /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/translog/ Corrupted Lucene index segments found - 32 documents will be lost. @@ -93,7 +93,7 @@ POST /_cluster/reroute You must accept the possibility of data loss by changing parameter `accept_data_loss` to `true`. -Deleted corrupt marker corrupted_FzTSBSuxT7i3Tls_TgwEag from /var/lib/elasticsearchdata/nodes/0/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ +Deleted corrupt marker corrupted_FzTSBSuxT7i3Tls_TgwEag from /var/lib/elasticsearchdata/indices/P45vf_YQRhqjfwLMUvSqDw/0/index/ -------------------------------------------------- diff --git a/docs/reference/migration/migrate_8_0/node.asciidoc b/docs/reference/migration/migrate_8_0/node.asciidoc index a1dcd654807e1..b1187e88b5d90 100644 --- a/docs/reference/migration/migrate_8_0/node.asciidoc +++ b/docs/reference/migration/migrate_8_0/node.asciidoc @@ -14,3 +14,25 @@ The `node.max_local_storage_nodes` setting was deprecated in 7.x and has been removed in 8.0. Nodes should be run on separate data paths to ensure that each node is consistently assigned to the same data path. + +[float] +==== Change of data folder layout + +Each node's data is now stored directly in the data directory set by the +`path.data` setting, rather than in `${path.data}/nodes/0`, because the removal +of the `node.max_local_storage_nodes` setting means that nodes may no longer +share a data path. At startup, Elasticsearch will automatically migrate the data +path to the new layout. This automatic migration will not proceed if the data +path contains data for more than one node. You should move to a configuration in +which each node has its own data path before upgrading. + +If you try to upgrade a configuration in which there is data for more than one +node in a data path then the automatic migration will fail and Elasticsearch +will refuse to start. To resolve this you will need to perform the migration +manually. The data for the extra nodes are stored in folders named +`${path.data}/nodes/1`, `${path.data}/nodes/2` and so on, and you should move +each of these folders to an appropriate location and then configure the +corresponding node to use this location for its data path. If your nodes each +have more than one data path in their `path.data` settings then you should move +all the corresponding subfolders in parallel. Each node uses the same subfolder +(e.g. `nodes/2`) across all its data paths. \ No newline at end of file diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java index 44d3c2a88a55b..49e30ac4b5ed3 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java @@ -51,10 +51,11 @@ public void testMissingWritePermission() throws IOException { Settings build = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build(); - IOException exception = expectThrows(IOException.class, () -> { + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); }); - assertTrue(exception.getMessage(), exception.getMessage().startsWith(path.toString())); + assertTrue(exception.getCause().getCause().getMessage(), + exception.getCause().getCause().getMessage().startsWith(path.toString())); } } @@ -62,7 +63,7 @@ public void testMissingWritePermissionOnIndex() throws IOException { assumeTrue("posix filesystem", isPosix); final String[] tempPaths = tmpPaths(); Path path = PathUtils.get(randomFrom(tempPaths)); - Path fooIndex = path.resolve("nodes").resolve("0").resolve(NodeEnvironment.INDICES_FOLDER) + Path fooIndex = path.resolve(NodeEnvironment.INDICES_FOLDER) .resolve("foo"); Files.createDirectories(fooIndex); try (PosixPermissionsResetter attr = new PosixPermissionsResetter(fooIndex)) { @@ -82,7 +83,7 @@ public void testMissingWritePermissionOnShard() throws IOException { assumeTrue("posix filesystem", isPosix); final String[] tempPaths = tmpPaths(); Path path = PathUtils.get(randomFrom(tempPaths)); - Path fooIndex = path.resolve("nodes").resolve("0").resolve(NodeEnvironment.INDICES_FOLDER) + Path fooIndex = path.resolve(NodeEnvironment.INDICES_FOLDER) .resolve("foo"); Path fooShard = fooIndex.resolve("0"); Path fooShardIndex = fooShard.resolve("index"); diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 4d19dd66732fc..75f39e70cfc7b 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; @@ -45,6 +46,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.gateway.MetaDataStateFormat; @@ -81,6 +83,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -90,9 +93,9 @@ */ public final class NodeEnvironment implements Closeable { public static class NodePath { - /* ${data.paths}/nodes/0 */ + /* ${data.paths} */ public final Path path; - /* ${data.paths}/nodes/0/indices */ + /* ${data.paths}/indices */ public final Path indicesPath; /** Cached FileStore from path */ public final FileStore fileStore; @@ -115,7 +118,7 @@ public NodePath(Path path) throws IOException { /** * Resolves the given shards directory against this NodePath - * ${data.paths}/nodes/{node.id}/indices/{index.uuid}/{shard.id} + * ${data.paths}/indices/{index.uuid}/{shard.id} */ public Path resolve(ShardId shardId) { return resolve(shardId.getIndex()).resolve(Integer.toString(shardId.id())); @@ -123,7 +126,7 @@ public Path resolve(ShardId shardId) { /** * Resolves index directory against this NodePath - * ${data.paths}/nodes/{node.id}/indices/{index.uuid} + * ${data.paths}/indices/{index.uuid} */ public Path resolve(Index index) { return resolve(index.getUUID()); @@ -170,7 +173,6 @@ public String toString() { public static final Setting ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING = Setting.boolSetting("node.enable_lucene_segment_infos_trace", false, Property.NodeScope); - public static final String NODES_FOLDER = "nodes"; public static final String INDICES_FOLDER = "indices"; public static final String NODE_LOCK_FILENAME = "node.lock"; @@ -179,20 +181,28 @@ public static class NodeLock implements Releasable { private final Lock[] locks; private final NodePath[] nodePaths; + + public NodeLock(final Logger logger, + final Environment environment, + final CheckedFunction pathFunction) throws IOException { + this(logger, environment, pathFunction, Function.identity()); + } + /** * Tries to acquire a node lock for a node id, throws {@code IOException} if it is unable to acquire it * @param pathFunction function to check node path before attempt of acquiring a node lock */ public NodeLock(final Logger logger, final Environment environment, - final CheckedFunction pathFunction) throws IOException { + final CheckedFunction pathFunction, + final Function subPathMapping) throws IOException { nodePaths = new NodePath[environment.dataFiles().length]; locks = new Lock[nodePaths.length]; try { final Path[] dataPaths = environment.dataFiles(); for (int dirIndex = 0; dirIndex < dataPaths.length; dirIndex++) { Path dataDir = dataPaths[dirIndex]; - Path dir = resolveNodePath(dataDir); + Path dir = subPathMapping.apply(dataDir); if (pathFunction.apply(dir) == false) { continue; } @@ -247,7 +257,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce sharedDataPath = environment.sharedDataFile(); for (Path path : environment.dataFiles()) { - Files.createDirectories(resolveNodePath(path)); + Files.createDirectories(path); } final NodeLock nodeLock; @@ -264,7 +274,6 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce this.locks = nodeLock.locks; this.nodePaths = nodeLock.nodePaths; - this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths); logger.debug("using node location {}", Arrays.toString(nodePaths)); @@ -278,6 +287,10 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce ensureAtomicMoveSupported(nodePaths); } + if (upgradeLegacyNodeFolders(logger, settings, environment, nodeLock)) { + assertCanWrite(); + } + if (DiscoveryNode.isDataNode(settings) == false) { if (DiscoveryNode.isMasterNode(settings) == false) { ensureNoIndexMetaData(nodePaths); @@ -286,6 +299,8 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce ensureNoShardData(nodePaths); } + this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths); + success = true; } finally { if (success == false) { @@ -295,13 +310,128 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce } /** - * Resolve a specific nodes/{node.id} path for the specified path and node lock id. - * - * @param path the path - * @return the resolved path + * Upgrades all data paths that have been written to by an older ES version to the 8.0+ compatible folder layout, + * removing the "nodes/${lockId}" folder prefix */ - public static Path resolveNodePath(final Path path) { - return path.resolve(NODES_FOLDER).resolve("0"); + private static boolean upgradeLegacyNodeFolders(Logger logger, Settings settings, Environment environment, + NodeLock nodeLock) throws IOException { + boolean upgradeNeeded = false; + + // check if we can do an auto-upgrade + for (Path path : environment.dataFiles()) { + final Path nodesFolderPath = path.resolve("nodes"); + if (Files.isDirectory(nodesFolderPath)) { + final List nodeLockIds = new ArrayList<>(); + + try (DirectoryStream stream = Files.newDirectoryStream(nodesFolderPath)) { + for (Path nodeLockIdPath : stream) { + String fileName = nodeLockIdPath.getFileName().toString(); + if (Files.isDirectory(nodeLockIdPath) && fileName.chars().allMatch(Character::isDigit)) { + int nodeLockId = Integer.parseInt(fileName); + nodeLockIds.add(nodeLockId); + } else if (FileSystemUtils.isDesktopServicesStore(nodeLockIdPath) == false) { + throw new IllegalStateException("unexpected file/folder encountered during data folder upgrade: " + + nodeLockIdPath); + } + } + } + + if (nodeLockIds.isEmpty() == false) { + upgradeNeeded = true; + + if (nodeLockIds.equals(Arrays.asList(0)) == false) { + throw new IllegalStateException("data path " + nodesFolderPath + " cannot be upgraded automatically because it " + + "contains data from nodes with ordinals " + nodeLockIds + ", due to previous use of the now obsolete " + + "[node.max_local_storage_nodes] setting. Please check the breaking changes docs for the current version of " + + "Elasticsearch to find an upgrade path"); + } + } + } + } + + if (upgradeNeeded == false) { + logger.trace("data folder upgrade not required"); + return false; + } + + logger.info("upgrading legacy data folders: {}", Arrays.toString(environment.dataFiles())); + + // acquire locks on legacy path for duration of upgrade (to ensure there is no older ES version running on this path) + final NodeLock legacyNodeLock; + try { + legacyNodeLock = new NodeLock(logger, environment, dir -> true, path -> path.resolve("nodes").resolve("0")); + } catch (IOException e) { + final String message = String.format( + Locale.ROOT, + "failed to obtain legacy node locks, tried %s;" + + " maybe these locations are not writable or multiple nodes were started on the same data path?", + Arrays.toString(environment.dataFiles())); + throw new IllegalStateException(message, e); + } + + // move contents from legacy path to new path + assert nodeLock.getNodePaths().length == legacyNodeLock.getNodePaths().length; + try { + final List> upgradeActions = new ArrayList<>(); + for (int i = 0; i < legacyNodeLock.getNodePaths().length; i++) { + final NodePath legacyNodePath = legacyNodeLock.getNodePaths()[i]; + final NodePath nodePath = nodeLock.getNodePaths()[i]; + + // determine folders to move and check that there are no extra files/folders + final Set folderNames = new HashSet<>(); + + try (DirectoryStream stream = Files.newDirectoryStream(legacyNodePath.path)) { + for (Path subFolderPath : stream) { + final String fileName = subFolderPath.getFileName().toString(); + if (FileSystemUtils.isDesktopServicesStore(subFolderPath)) { + // ignore + } else if (FileSystemUtils.isAccessibleDirectory(subFolderPath, logger)) { + if (fileName.equals(INDICES_FOLDER) == false && // indices folder + fileName.equals(MetaDataStateFormat.STATE_DIR_NAME) == false) { // global metadata & node state folder + throw new IllegalStateException("unexpected folder encountered during data folder upgrade: " + + subFolderPath); + } + final Path targetSubFolderPath = nodePath.path.resolve(fileName); + if (Files.exists(targetSubFolderPath)) { + throw new IllegalStateException("target folder already exists during data folder upgrade: " + + targetSubFolderPath); + } + folderNames.add(fileName); + } else if (fileName.equals(NODE_LOCK_FILENAME) == false && + fileName.equals(TEMP_FILE_NAME) == false) { + throw new IllegalStateException("unexpected file/folder encountered during data folder upgrade: " + + subFolderPath); + } + } + } + + assert Sets.difference(folderNames, Sets.newHashSet(INDICES_FOLDER, MetaDataStateFormat.STATE_DIR_NAME)).isEmpty() : + "expected indices and/or state dir folder but was " + folderNames; + + upgradeActions.add(() -> { + for (String folderName : folderNames) { + final Path sourceSubFolderPath = legacyNodePath.path.resolve(folderName); + final Path targetSubFolderPath = nodePath.path.resolve(folderName); + Files.move(sourceSubFolderPath, targetSubFolderPath, StandardCopyOption.ATOMIC_MOVE); + logger.info("data folder upgrade: moved from [{}] to [{}]", sourceSubFolderPath, targetSubFolderPath); + } + IOUtils.fsync(nodePath.path, true); + }); + } + // now do the actual upgrade. start by upgrading the node metadata file before moving anything, since a downgrade in an + // intermediate state would be pretty disastrous + loadOrCreateNodeMetaData(settings, logger, legacyNodeLock.getNodePaths()); + for (CheckedRunnable upgradeAction : upgradeActions) { + upgradeAction.run(); + } + } finally { + legacyNodeLock.close(); + } + + // upgrade successfully completed, remove legacy nodes folders + IOUtils.rm(Stream.of(environment.dataFiles()).map(path -> path.resolve("nodes")).toArray(Path[]::new)); + + return true; } private void maybeLogPathDetails() throws IOException { @@ -801,14 +931,14 @@ public Path[] availableShardPaths(ShardId shardId) { } /** - * Returns all folder names in ${data.paths}/nodes/{node.id}/indices folder + * Returns all folder names in ${data.paths}/indices folder */ public Set availableIndexFolders() throws IOException { return availableIndexFolders(p -> false); } /** - * Returns folder names in ${data.paths}/nodes/{node.id}/indices folder that don't match the given predicate. + * Returns folder names in ${data.paths}/indices folder that don't match the given predicate. * @param excludeIndexPathIdsPredicate folder names to exclude */ public Set availableIndexFolders(Predicate excludeIndexPathIdsPredicate) throws IOException { @@ -825,7 +955,7 @@ public Set availableIndexFolders(Predicate excludeIndexPathIdsPr } /** - * Return all directory names in the nodes/{node.id}/indices directory for the given node path. + * Return all directory names in the indices directory for the given node path. * * @param nodePath the path * @return all directories that could be indices for the given node path. @@ -836,7 +966,7 @@ public Set availableIndexFoldersForPath(final NodePath nodePath) throws } /** - * Return directory names in the nodes/{node.id}/indices directory for the given node path that don't match the given predicate. + * Return directory names in the indices directory for the given node path that don't match the given predicate. * * @param nodePath the path * @param excludeIndexPathIdsPredicate folder names to exclude @@ -865,7 +995,7 @@ public Set availableIndexFoldersForPath(final NodePath nodePath, Predica } /** - * Resolves all existing paths to indexFolderName in ${data.paths}/nodes/{node.id}/indices + * Resolves all existing paths to indexFolderName in ${data.paths}/indices */ public Path[] resolveIndexFolder(String indexFolderName) { if (nodePaths == null || locks == null) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java index 16db596515b4c..5fc3ba57980bf 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -140,17 +140,14 @@ protected void findAndProcessShardPath(OptionSet options, Environment environmen IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, shardParent); final String shardIdFileName = path.getFileName().toString(); - final String nodeIdFileName = shardParentParent.getParent().getFileName().toString(); if (Files.isDirectory(path) && shardIdFileName.chars().allMatch(Character::isDigit) // SHARD-ID path element check && NodeEnvironment.INDICES_FOLDER.equals(shardParentParent.getFileName().toString()) // `indices` check - && nodeIdFileName.chars().allMatch(Character::isDigit) // NODE-ID check - && NodeEnvironment.NODES_FOLDER.equals(shardParentParent.getParent().getParent().getFileName().toString()) // `nodes` check ) { shardId = Integer.parseInt(shardIdFileName); indexName = indexMetaData.getIndex().getName(); } else { throw new ElasticsearchException("Unable to resolve shard id. Wrong folder structure at [ " + path.toString() - + " ], expected .../nodes/[NODE-ID]/indices/[INDEX-UUID]/[SHARD-ID]"); + + " ], expected .../indices/[INDEX-UUID]/[SHARD-ID]"); } } else { // otherwise resolve shardPath based on the index name and shard id diff --git a/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java b/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java index 53efeb393e4b4..720439768fabc 100644 --- a/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java +++ b/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java @@ -18,6 +18,12 @@ */ package org.elasticsearch.bwcompat; +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESIntegTestCase; + import java.io.IOException; import java.io.InputStream; import java.nio.file.DirectoryStream; @@ -26,13 +32,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestUtil; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.test.ESIntegTestCase; - import static org.hamcrest.Matchers.containsString; @LuceneTestCase.SuppressCodecs("*") @@ -69,8 +68,8 @@ protected Settings prepareBackwardsDataDir(Path backwardsIndex) throws IOExcepti } throw new IllegalStateException(builder.toString()); } - Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER); - Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER); + Path src = list[0].resolve("nodes"); + Path dest = dataDir.resolve("nodes"); assertTrue(Files.exists(src)); Files.move(src, dest); assertFalse(Files.exists(src)); diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java index 74de578426f2c..4d1848428e5a7 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -21,13 +21,24 @@ import org.elasticsearch.Version; import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.List; +import java.util.stream.Collectors; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; @@ -123,4 +134,78 @@ public void testFailsToStartIfUpgradedTooFar() { assertThat(illegalStateException.getMessage(), allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]"))); } + + public void testUpgradeDataFolder() throws IOException, InterruptedException { + String node = internalCluster().startNode(); + prepareCreate("test").get(); + indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("{}", XContentType.JSON)); + String nodeId = client().admin().cluster().prepareState().get().getState().nodes().getMasterNodeId(); + + final Settings dataPathSettings = internalCluster().dataPathSettings(node); + internalCluster().stopRandomDataNode(); + + // simulate older data path layout by moving data under "nodes/0" folder + final List dataPaths = Environment.PATH_DATA_SETTING.get(dataPathSettings) + .stream().map(PathUtils::get).collect(Collectors.toList()); + dataPaths.forEach(path -> { + final Path targetPath = path.resolve("nodes").resolve("0"); + try { + Files.createDirectories(targetPath); + + try (DirectoryStream stream = Files.newDirectoryStream(path)) { + for (Path subPath : stream) { + String fileName = subPath.getFileName().toString(); + Path targetSubPath = targetPath.resolve(fileName); + if (fileName.equals("nodes") == false) { + Files.move(subPath, targetSubPath, StandardCopyOption.ATOMIC_MOVE); + } + } + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + + dataPaths.forEach(path -> assertTrue(Files.exists(path.resolve("nodes")))); + + // create extra file/folder, and check that upgrade fails + if (dataPaths.isEmpty() == false) { + final Path badFileInNodesDir = Files.createTempFile(randomFrom(dataPaths).resolve("nodes"), "bad", "file"); + IllegalStateException ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings)); + assertThat(ise.getMessage(), containsString("unexpected file/folder encountered during data folder upgrade")); + Files.delete(badFileInNodesDir); + + final Path badFolderInNodesDir = Files.createDirectories(randomFrom(dataPaths).resolve("nodes").resolve("bad-folder")); + ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings)); + assertThat(ise.getMessage(), containsString("unexpected file/folder encountered during data folder upgrade")); + Files.delete(badFolderInNodesDir); + + final Path badFile = Files.createTempFile(randomFrom(dataPaths).resolve("nodes").resolve("0"), "bad", "file"); + ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings)); + assertThat(ise.getMessage(), containsString("unexpected file/folder encountered during data folder upgrade")); + Files.delete(badFile); + + final Path badFolder = Files.createDirectories(randomFrom(dataPaths).resolve("nodes").resolve("0").resolve("bad-folder")); + ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings)); + assertThat(ise.getMessage(), containsString("unexpected folder encountered during data folder upgrade")); + Files.delete(badFolder); + + final Path conflictingFolder = randomFrom(dataPaths).resolve("indices"); + if (Files.exists(conflictingFolder) == false) { + Files.createDirectories(conflictingFolder); + ise = expectThrows(IllegalStateException.class, () -> internalCluster().startNode(dataPathSettings)); + assertThat(ise.getMessage(), containsString("target folder already exists during data folder upgrade")); + Files.delete(conflictingFolder); + } + } + + // check that upgrade works + dataPaths.forEach(path -> assertTrue(Files.exists(path.resolve("nodes")))); + internalCluster().startNode(dataPathSettings); + dataPaths.forEach(path -> assertFalse(Files.exists(path.resolve("nodes")))); + assertEquals(nodeId, client().admin().cluster().prepareState().get().getState().nodes().getMasterNodeId()); + assertTrue(client().admin().indices().prepareExists("test").get().isExists()); + ensureYellow("test"); + assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); + } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index f21b55b9aee8f..5bb1152bcbe45 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -373,10 +373,10 @@ public void testCustomDataPaths() throws Exception { assertThat("shard paths with a custom data_path should contain only regular paths", env.availableShardPaths(sid), - equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID() + "/0"))); + equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID() + "/0"))); assertThat("index paths uses the regular template", - env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID()))); + env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID()))); IndexSettings s3 = new IndexSettings(s2.getIndexMetaData(), Settings.builder().build()); @@ -385,10 +385,10 @@ public void testCustomDataPaths() throws Exception { assertThat("shard paths with a custom data_path should contain only regular paths", env.availableShardPaths(sid), - equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID() + "/0"))); + equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID() + "/0"))); assertThat("index paths uses the regular template", - env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID()))); + env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "indices/" + index.getUUID()))); env.close(); } @@ -418,7 +418,7 @@ public void testExistingTempFiles() throws IOException { String[] paths = tmpPaths(); // simulate some previous left over temp files for (String path : randomSubsetOf(randomIntBetween(1, paths.length), paths)) { - final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path)); + final Path nodePath = PathUtils.get(path); Files.createDirectories(nodePath); Files.createFile(nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME)); if (randomBoolean()) { @@ -433,7 +433,7 @@ public void testExistingTempFiles() throws IOException { // check we clean up for (String path: paths) { - final Path nodePath = NodeEnvironment.resolveNodePath(PathUtils.get(path)); + final Path nodePath = PathUtils.get(path); final Path tempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME); assertFalse(tempFile + " should have been cleaned", Files.exists(tempFile)); final Path srcTempFile = nodePath.resolve(NodeEnvironment.TEMP_FILE_NAME + ".src"); diff --git a/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java index 4e6e3036f4c40..73ae826d7211f 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java @@ -90,7 +90,7 @@ static class MockUsableSpaceFileSystemProvider extends FilterFileSystemProvider @Override public FileStore getFileStore(Path path) throws IOException { - if (path.toString().contains(aPathPart)) { + if (path.toString().contains(aPathPart) || (path.toString() + path.getFileSystem().getSeparator()).contains(aPathPart)) { return aFileStore; } else { return bFileStore; diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index c7b1846356363..3291a250f5ccb 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -94,8 +94,7 @@ public void setup() throws IOException { .putList(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath().toString()).build()); // create same directory structure as prod does - final Path path = NodeEnvironment.resolveNodePath(dataDir); - Files.createDirectories(path); + Files.createDirectories(dataDir); settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) @@ -103,7 +102,7 @@ public void setup() throws IOException { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build(); - final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(path); + final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(dataDir); shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); final IndexMetaData.Builder metaData = IndexMetaData.builder(routing.getIndexName()) .settings(settings) From 79a3de4152fa63707713356098b3a7849b5231da Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 28 May 2019 14:20:42 -0400 Subject: [PATCH 312/321] [DOCS] Set explicit anchors for Asciidoctor (#42521) --- docs/reference/mapping/types/nested.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 5969dcfd6956b..fe150a69b4900 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -193,7 +193,7 @@ phase. Instead, highlighting needs to be performed via ============================================= - +[[limit-number-nested-fields]] ==== Limiting the number of `nested` fields Indexing a document with 100 nested fields actually indexes 101 documents as each nested From 8fae57b3816a1bce7a0365577a74bf1f20f543bf Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Tue, 28 May 2019 13:27:13 -0500 Subject: [PATCH 313/321] unmute 'Test url escaping with url mustache function' and bump logging (#42400) --- x-pack/qa/smoke-test-watcher/build.gradle | 1 + .../rest-api-spec/test/mustache/50_webhook_url_escaping.yml | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/x-pack/qa/smoke-test-watcher/build.gradle b/x-pack/qa/smoke-test-watcher/build.gradle index 9194c46daed01..8de7448618ea1 100644 --- a/x-pack/qa/smoke-test-watcher/build.gradle +++ b/x-pack/qa/smoke-test-watcher/build.gradle @@ -12,6 +12,7 @@ integTestCluster { setting 'xpack.ml.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' + setting 'logger.org.elasticsearch.xpack.core.watcher', 'DEBUG' } integTestRunner { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml index e11809a79baa5..bb06aca4f95a4 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml @@ -1,8 +1,6 @@ --- "Test url escaping with url mustache function": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/41172" + - do: cluster.health: wait_for_status: yellow From 792435f160ac5fe8dd1218d98a0cb4f0a530030b Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Tue, 28 May 2019 20:37:30 +0200 Subject: [PATCH 314/321] check position before and after latch (#42623) check position before and after latch #fixes 42084 --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 95b3de5eb333e..fc86a9554880f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -225,7 +225,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStateMachine() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); @@ -236,10 +235,11 @@ public void testStateMachine() throws Exception { assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + assertTrue(awaitBusy(() -> indexer.getPosition() == 2)); countDownLatch.countDown(); - - assertThat(indexer.getPosition(), equalTo(2)); assertTrue(awaitBusy(() -> isFinished.get())); + assertThat(indexer.getPosition(), equalTo(3)); + assertFalse(isStopped.get()); assertThat(indexer.getStep(), equalTo(6)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); From aad6cc42411ac475b94b00e9570f65660d68c6be Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 28 May 2019 15:18:51 -0400 Subject: [PATCH 315/321] [DOCS] Fix X-Pack tag for Asciidoctor (#42443) --- docs/reference/ccr/apis/follow-request-body.asciidoc | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/reference/ccr/apis/follow-request-body.asciidoc b/docs/reference/ccr/apis/follow-request-body.asciidoc index e7e6ae2e26a05..d8fb725f02b14 100644 --- a/docs/reference/ccr/apis/follow-request-body.asciidoc +++ b/docs/reference/ccr/apis/follow-request-body.asciidoc @@ -1,4 +1,3 @@ -[role="xpack"] [testenv="platinum"] `max_read_request_operation_count`:: (integer) the maximum number of operations to pull per read from the remote From 7df025ce8619909b1e3c73f46c5e076a5e5e0be2 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 28 May 2019 22:11:34 +0200 Subject: [PATCH 316/321] fix javadoc of SearchRequestBuilder#setTrackTotalHits (#42219) --- .../elasticsearch/action/search/SearchRequestBuilder.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 96c93c974cabb..3e2d835a4b803 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -378,7 +378,9 @@ public SearchRequestBuilder setTrackScores(boolean trackScores) { } /** - * Indicates if the total hit count for the query should be tracked. Defaults to {@code true} + * Indicates if the total hit count for the query should be tracked. Requests will count total hit count accurately + * up to 10,000 by default, see {@link #setTrackTotalHitsUpTo(int)} to change this value or set to true/false to always/never + * count accurately. */ public SearchRequestBuilder setTrackTotalHits(boolean trackTotalHits) { sourceBuilder().trackTotalHits(trackTotalHits); @@ -386,7 +388,7 @@ public SearchRequestBuilder setTrackTotalHits(boolean trackTotalHits) { } /** - * Indicates if the total hit count for the query should be tracked. Defaults to {@code true} + * Indicates the total hit count that should be tracked accurately or null if the value is unset. Defaults to 10,000. */ public SearchRequestBuilder setTrackTotalHitsUpTo(int trackTotalHitsUpTo) { sourceBuilder().trackTotalHitsUpTo(trackTotalHitsUpTo); From 4da6453673f28b4899f92bcc9867f153332154c9 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 28 May 2019 21:41:15 +0100 Subject: [PATCH 317/321] [ML Data Frame] Mute stop start test Relates to https://github.com/elastic/elasticsearch/issues/42650 --- .../rest-api-spec/test/data_frame/transforms_start_stop.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index 4909761c5633b..58af6e0899dda 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -90,6 +90,9 @@ teardown: - match: { airline-data-by-airline-start-stop.mappings: {} } --- "Test start/stop/start transform": + - skip: + reason: "https://github.com/elastic/elasticsearch/issues/42650" + version: "all" - do: data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" From 0f5f8880c414febf5e1bc180aaed1b0b65a7e3ae Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 28 May 2019 23:13:53 +0200 Subject: [PATCH 318/321] Add 7.1.2 version constant. (#42643) Relates to #42635 --- server/src/main/java/org/elasticsearch/Version.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 48d37957844e2..844b963e9aa83 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -74,6 +74,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_1_ID = 7010199; public static final Version V_7_1_1 = new Version(V_7_1_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_1_2_ID = 7010299; + public static final Version V_7_1_2 = new Version(V_7_1_2_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_2_0_ID = 7020099; public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_3_0_ID = 7030099; @@ -100,6 +102,8 @@ public static Version fromId(int id) { return V_7_3_0; case V_7_2_0_ID: return V_7_2_0; + case V_7_1_2_ID: + return V_7_1_2; case V_7_1_1_ID: return V_7_1_1; case V_7_1_0_ID: From aaf0ab42cb314b87782f85a313156e0be807838c Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 28 May 2019 23:22:52 +0200 Subject: [PATCH 319/321] Adjust use of Deprecated Netty API (#42613) * With the recent upgrade to Netty 4.1.36 this method became deprecated and I made the advised change to fix the deprecation --- .../elasticsearch/http/netty4/Netty4HttpServerTransport.java | 4 ++-- .../org/elasticsearch/transport/netty4/Netty4Transport.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 356cfa0bbf99d..8b31e0bcb28a2 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -23,8 +23,8 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerAdapter; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.FixedRecvByteBufAllocator; @@ -351,7 +351,7 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E } @ChannelHandler.Sharable - private static class ServerChannelExceptionHandler extends ChannelHandlerAdapter { + private static class ServerChannelExceptionHandler extends ChannelInboundHandlerAdapter { private final Netty4HttpServerTransport transport; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 34fb2173143c8..f2871ff34e8b7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -25,8 +25,8 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerAdapter; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.FixedRecvByteBufAllocator; @@ -315,7 +315,7 @@ private void addClosedExceptionLogger(Channel channel) { } @ChannelHandler.Sharable - private class ServerChannelExceptionHandler extends ChannelHandlerAdapter { + private class ServerChannelExceptionHandler extends ChannelInboundHandlerAdapter { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { From a1e78585d18fe07cce52950cd39f9992da3c126b Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 28 May 2019 17:40:07 -0700 Subject: [PATCH 320/321] Fix a callout in the field alias docs. --- docs/reference/mapping/types/alias.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/alias.asciidoc b/docs/reference/mapping/types/alias.asciidoc index a7bba54df56e4..c70d96a7e572d 100644 --- a/docs/reference/mapping/types/alias.asciidoc +++ b/docs/reference/mapping/types/alias.asciidoc @@ -16,7 +16,7 @@ PUT trips }, "route_length_miles": { "type": "alias", - "path": "distance" // <1> + "path": "distance" <1> }, "transit_mode": { "type": "keyword" From 813e57d2d8e66e51f423e77b178e05316335391e Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 28 May 2019 17:52:35 -0700 Subject: [PATCH 321/321] Add explicit build flag for experimenting with test execution cacheability (#42649) * Add build flag for ignoring random test seed as task input * Fix checkstyle violations --- .../elasticsearch/gradle/BuildPlugin.groovy | 31 +++++------ ...emPropertyCommandLineArgumentProvider.java | 30 +++++++++++ .../testfixtures/TestFixturesPlugin.java | 53 +++++++++++-------- 3 files changed, 72 insertions(+), 42 deletions(-) create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index b5c69a418cceb..92d11a8477436 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -817,7 +817,7 @@ class BuildPlugin implements Plugin { } test.jvmArgumentProviders.add(nonInputProperties) - test.extensions.getByType(ExtraPropertiesExtension).set('nonInputProperties', nonInputProperties) + test.extensions.add('nonInputProperties', nonInputProperties) test.executable = "${ext.get('runtimeJavaHome')}/bin/java" test.workingDir = project.file("${project.buildDir}/testrun/${test.name}") @@ -842,17 +842,25 @@ class BuildPlugin implements Plugin { // we use './temp' since this is per JVM and tests are forbidden from writing to CWD test.systemProperties 'gradle.dist.lib': new File(project.class.location.toURI()).parent, - 'gradle.worker.jar': "${project.gradle.getGradleUserHomeDir()}/caches/${project.gradle.gradleVersion}/workerMain/gradle-worker.jar", - 'gradle.user.home': project.gradle.getGradleUserHomeDir(), 'java.io.tmpdir': './temp', 'java.awt.headless': 'true', 'tests.gradle': 'true', 'tests.artifact': project.name, 'tests.task': test.path, 'tests.security.manager': 'true', - 'tests.seed': project.property('testSeed'), 'jna.nosys': 'true' + // ignore changing test seed when build is passed -Dignore.tests.seed for cacheability experimentation + if (System.getProperty('ignore.tests.seed') != null) { + nonInputProperties.systemProperty('tests.seed', project.property('testSeed')) + } else { + test.systemProperty('tests.seed', project.property('testSeed')) + } + + // don't track these as inputs since they contain absolute paths and break cache relocatability + nonInputProperties.systemProperty('gradle.worker.jar', "${project.gradle.getGradleUserHomeDir()}/caches/${project.gradle.gradleVersion}/workerMain/gradle-worker.jar") + nonInputProperties.systemProperty('gradle.user.home', project.gradle.getGradleUserHomeDir()) + nonInputProperties.systemProperty('compiler.java', "${-> (ext.get('compilerJavaVersion') as JavaVersion).getMajorVersion()}") // TODO: remove setting logging level via system property @@ -965,19 +973,4 @@ class BuildPlugin implements Plugin { }) } } - - private static class SystemPropertyCommandLineArgumentProvider implements CommandLineArgumentProvider { - private final Map systemProperties = [:] - - void systemProperty(String key, Object value) { - systemProperties.put(key, value) - } - - @Override - Iterable asArguments() { - return systemProperties.collect { key, value -> - "-D${key}=${value.toString()}".toString() - } - } - } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java b/buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java new file mode 100644 index 0000000000000..7e808724035df --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/SystemPropertyCommandLineArgumentProvider.java @@ -0,0 +1,30 @@ +package org.elasticsearch.gradle; + +import org.gradle.api.tasks.Input; +import org.gradle.process.CommandLineArgumentProvider; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.stream.Collectors; + +public class SystemPropertyCommandLineArgumentProvider implements CommandLineArgumentProvider { + private final Map systemProperties = new LinkedHashMap<>(); + + public void systemProperty(String key, Object value) { + systemProperties.put(key, value); + } + + @Override + public Iterable asArguments() { + return systemProperties.entrySet() + .stream() + .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue()) + .collect(Collectors.toList()); + } + + // Track system property keys as an input so our build cache key will change if we add properties but values are still ignored + @Input + public Iterable getPropertyNames() { + return systemProperties.keySet(); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index b930955236fb8..0313123655afd 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -22,9 +22,11 @@ import com.avast.gradle.dockercompose.DockerComposePlugin; import com.avast.gradle.dockercompose.tasks.ComposeUp; import org.elasticsearch.gradle.OS; +import org.elasticsearch.gradle.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.precommit.JarHellTask; import org.elasticsearch.gradle.precommit.TestingConventionsTasks; import org.elasticsearch.gradle.precommit.ThirdPartyAuditTask; +import org.gradle.api.Action; import org.gradle.api.DefaultTask; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -122,7 +124,8 @@ public void apply(Project project) { configureServiceInfoForTask( task, fixtureProject, - task::systemProperty + (name, host) -> + task.getExtensions().getByType(SystemPropertyCommandLineArgumentProvider.class).systemProperty(name, host) ); task.dependsOn(fixtureProject.getTasks().getByName("postProcessFixture")); }) @@ -143,28 +146,32 @@ private void conditionTaskByType(TaskContainer tasks, TestFixtureExtension exten private void configureServiceInfoForTask(Task task, Project fixtureProject, BiConsumer consumer) { // Configure ports for the tests as system properties. // We only know these at execution time so we need to do it in doFirst - task.doFirst(theTask -> - fixtureProject.getExtensions().getByType(ComposeExtension.class).getServicesInfos() - .forEach((service, infos) -> { - infos.getTcpPorts() - .forEach((container, host) -> { - String name = "test.fixtures." + service + ".tcp." + container; - theTask.getLogger().info("port mapping property: {}={}", name, host); - consumer.accept( - name, - host - ); - }); - infos.getUdpPorts() - .forEach((container, host) -> { - String name = "test.fixtures." + service + ".udp." + container; - theTask.getLogger().info("port mapping property: {}={}", name, host); - consumer.accept( - name, - host - ); - }); - }) + task.doFirst(new Action() { + @Override + public void execute(Task theTask) { + fixtureProject.getExtensions().getByType(ComposeExtension.class).getServicesInfos() + .forEach((service, infos) -> { + infos.getTcpPorts() + .forEach((container, host) -> { + String name = "test.fixtures." + service + ".tcp." + container; + theTask.getLogger().info("port mapping property: {}={}", name, host); + consumer.accept( + name, + host + ); + }); + infos.getUdpPorts() + .forEach((container, host) -> { + String name = "test.fixtures." + service + ".udp." + container; + theTask.getLogger().info("port mapping property: {}={}", name, host); + consumer.accept( + name, + host + ); + }); + }); + } + } ); }